From 94d8277c3ad5a092ee520ba88b0bb87e1313fd6b Mon Sep 17 00:00:00 2001 From: Owl Bot Date: Tue, 17 Oct 2023 20:02:41 +0000 Subject: [PATCH] chore: Update gapic-generator-python to v1.11.8 PiperOrigin-RevId: 574178735 Source-Link: https://github.com/googleapis/googleapis/commit/7307199008ee2d57a4337066de29f9cd8c444bc6 Source-Link: https://github.com/googleapis/googleapis-gen/commit/ce3af21b7c559a87c2befc076be0e3aeda3a26f0 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiY2UzYWYyMWI3YzU1OWE4N2MyYmVmYzA3NmJlMGUzYWVkYTNhMjZmMCJ9 --- owl-bot-staging/bigtable/v2/.coveragerc | 13 + owl-bot-staging/bigtable/v2/.flake8 | 33 + owl-bot-staging/bigtable/v2/MANIFEST.in | 2 + owl-bot-staging/bigtable/v2/README.rst | 49 + .../bigtable/v2/docs/_static/custom.css | 3 + .../bigtable/v2/docs/bigtable_v2/bigtable.rst | 6 + .../bigtable/v2/docs/bigtable_v2/services.rst | 6 + .../bigtable/v2/docs/bigtable_v2/types.rst | 6 + owl-bot-staging/bigtable/v2/docs/conf.py | 376 + owl-bot-staging/bigtable/v2/docs/index.rst | 7 + .../v2/google/cloud/bigtable/__init__.py | 107 + .../v2/google/cloud/bigtable/gapic_version.py | 16 + .../v2/google/cloud/bigtable/py.typed | 2 + .../v2/google/cloud/bigtable_v2/__init__.py | 108 + .../cloud/bigtable_v2/gapic_metadata.json | 163 + .../google/cloud/bigtable_v2/gapic_version.py | 16 + .../v2/google/cloud/bigtable_v2/py.typed | 2 + .../cloud/bigtable_v2/services/__init__.py | 15 + .../bigtable_v2/services/bigtable/__init__.py | 22 + .../services/bigtable/async_client.py | 1160 ++ .../bigtable_v2/services/bigtable/client.py | 1415 ++ .../services/bigtable/transports/__init__.py | 38 + .../services/bigtable/transports/base.py | 272 + .../services/bigtable/transports/grpc.py | 501 + .../bigtable/transports/grpc_asyncio.py | 500 + .../services/bigtable/transports/rest.py | 1261 ++ .../cloud/bigtable_v2/types/__init__.py | 108 + .../cloud/bigtable_v2/types/bigtable.py | 1186 ++ .../v2/google/cloud/bigtable_v2/types/data.py | 1102 ++ .../cloud/bigtable_v2/types/feature_flags.py | 82 + .../cloud/bigtable_v2/types/request_stats.py | 171 + .../bigtable_v2/types/response_params.py | 64 + owl-bot-staging/bigtable/v2/mypy.ini | 3 + owl-bot-staging/bigtable/v2/noxfile.py | 184 + .../v2/scripts/fixup_bigtable_v2_keywords.py | 184 + owl-bot-staging/bigtable/v2/setup.py | 90 + .../bigtable/v2/testing/constraints-3.10.txt | 6 + .../bigtable/v2/testing/constraints-3.11.txt | 6 + .../bigtable/v2/testing/constraints-3.12.txt | 6 + .../bigtable/v2/testing/constraints-3.7.txt | 9 + .../bigtable/v2/testing/constraints-3.8.txt | 6 + .../bigtable/v2/testing/constraints-3.9.txt | 6 + owl-bot-staging/bigtable/v2/tests/__init__.py | 16 + .../bigtable/v2/tests/unit/__init__.py | 16 + .../bigtable/v2/tests/unit/gapic/__init__.py | 16 + .../tests/unit/gapic/bigtable_v2/__init__.py | 16 + .../unit/gapic/bigtable_v2/test_bigtable.py | 5644 ++++++ owl-bot-staging/bigtable_admin/v2/.coveragerc | 13 + owl-bot-staging/bigtable_admin/v2/.flake8 | 33 + owl-bot-staging/bigtable_admin/v2/MANIFEST.in | 2 + owl-bot-staging/bigtable_admin/v2/README.rst | 49 + .../bigtable_admin/v2/docs/_static/custom.css | 3 + .../bigtable_instance_admin.rst | 10 + .../bigtable_table_admin.rst | 10 + .../v2/docs/bigtable_admin_v2/services.rst | 7 + .../v2/docs/bigtable_admin_v2/types.rst | 6 + .../bigtable_admin/v2/docs/conf.py | 376 + .../bigtable_admin/v2/docs/index.rst | 7 + .../google/cloud/bigtable_admin/__init__.py | 189 + .../cloud/bigtable_admin/gapic_version.py | 16 + .../v2/google/cloud/bigtable_admin/py.typed | 2 + .../cloud/bigtable_admin_v2/__init__.py | 190 + .../bigtable_admin_v2/gapic_metadata.json | 737 + .../cloud/bigtable_admin_v2/gapic_version.py | 16 + .../google/cloud/bigtable_admin_v2/py.typed | 2 + .../bigtable_admin_v2/services/__init__.py | 15 + .../bigtable_instance_admin/__init__.py | 22 + .../bigtable_instance_admin/async_client.py | 2168 +++ .../bigtable_instance_admin/client.py | 2326 +++ .../bigtable_instance_admin/pagers.py | 261 + .../transports/__init__.py | 38 + .../transports/base.py | 536 + .../transports/grpc.py | 849 + .../transports/grpc_asyncio.py | 848 + .../transports/rest.py | 2776 +++ .../services/bigtable_table_admin/__init__.py | 22 + .../bigtable_table_admin/async_client.py | 2645 +++ .../services/bigtable_table_admin/client.py | 2825 +++ .../services/bigtable_table_admin/pagers.py | 382 + .../transports/__init__.py | 38 + .../bigtable_table_admin/transports/base.py | 571 + .../bigtable_table_admin/transports/grpc.py | 996 ++ .../transports/grpc_asyncio.py | 995 ++ .../bigtable_table_admin/transports/rest.py | 3310 ++++ .../cloud/bigtable_admin_v2/types/__init__.py | 186 + .../types/bigtable_instance_admin.py | 891 + .../types/bigtable_table_admin.py | 1364 ++ .../cloud/bigtable_admin_v2/types/common.py | 81 + .../cloud/bigtable_admin_v2/types/instance.py | 620 + .../cloud/bigtable_admin_v2/types/table.py | 727 + owl-bot-staging/bigtable_admin/v2/mypy.ini | 3 + owl-bot-staging/bigtable_admin/v2/noxfile.py | 184 + .../fixup_bigtable_admin_v2_keywords.py | 218 + owl-bot-staging/bigtable_admin/v2/setup.py | 91 + .../v2/testing/constraints-3.10.txt | 7 + .../v2/testing/constraints-3.11.txt | 7 + .../v2/testing/constraints-3.12.txt | 7 + .../v2/testing/constraints-3.7.txt | 10 + .../v2/testing/constraints-3.8.txt | 7 + .../v2/testing/constraints-3.9.txt | 7 + .../bigtable_admin/v2/tests/__init__.py | 16 + .../bigtable_admin/v2/tests/unit/__init__.py | 16 + .../v2/tests/unit/gapic/__init__.py | 16 + .../unit/gapic/bigtable_admin_v2/__init__.py | 16 + .../test_bigtable_instance_admin.py | 12025 +++++++++++++ .../test_bigtable_table_admin.py | 14140 ++++++++++++++++ 106 files changed, 68950 insertions(+) create mode 100644 owl-bot-staging/bigtable/v2/.coveragerc create mode 100644 owl-bot-staging/bigtable/v2/.flake8 create mode 100644 owl-bot-staging/bigtable/v2/MANIFEST.in create mode 100644 owl-bot-staging/bigtable/v2/README.rst create mode 100644 owl-bot-staging/bigtable/v2/docs/_static/custom.css create mode 100644 owl-bot-staging/bigtable/v2/docs/bigtable_v2/bigtable.rst create mode 100644 owl-bot-staging/bigtable/v2/docs/bigtable_v2/services.rst create mode 100644 owl-bot-staging/bigtable/v2/docs/bigtable_v2/types.rst create mode 100644 owl-bot-staging/bigtable/v2/docs/conf.py create mode 100644 owl-bot-staging/bigtable/v2/docs/index.rst create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable/__init__.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable/gapic_version.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable/py.typed create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/__init__.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_metadata.json create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_version.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/py.typed create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/__init__.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/__init__.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/async_client.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/client.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/base.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/rest.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/__init__.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/bigtable.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/data.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/feature_flags.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/request_stats.py create mode 100644 owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/response_params.py create mode 100644 owl-bot-staging/bigtable/v2/mypy.ini create mode 100644 owl-bot-staging/bigtable/v2/noxfile.py create mode 100644 owl-bot-staging/bigtable/v2/scripts/fixup_bigtable_v2_keywords.py create mode 100644 owl-bot-staging/bigtable/v2/setup.py create mode 100644 owl-bot-staging/bigtable/v2/testing/constraints-3.10.txt create mode 100644 owl-bot-staging/bigtable/v2/testing/constraints-3.11.txt create mode 100644 owl-bot-staging/bigtable/v2/testing/constraints-3.12.txt create mode 100644 owl-bot-staging/bigtable/v2/testing/constraints-3.7.txt create mode 100644 owl-bot-staging/bigtable/v2/testing/constraints-3.8.txt create mode 100644 owl-bot-staging/bigtable/v2/testing/constraints-3.9.txt create mode 100644 owl-bot-staging/bigtable/v2/tests/__init__.py create mode 100644 owl-bot-staging/bigtable/v2/tests/unit/__init__.py create mode 100644 owl-bot-staging/bigtable/v2/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/__init__.py create mode 100644 owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/test_bigtable.py create mode 100644 owl-bot-staging/bigtable_admin/v2/.coveragerc create mode 100644 owl-bot-staging/bigtable_admin/v2/.flake8 create mode 100644 owl-bot-staging/bigtable_admin/v2/MANIFEST.in create mode 100644 owl-bot-staging/bigtable_admin/v2/README.rst create mode 100644 owl-bot-staging/bigtable_admin/v2/docs/_static/custom.css create mode 100644 owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_instance_admin.rst create mode 100644 owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_table_admin.rst create mode 100644 owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/services.rst create mode 100644 owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/types.rst create mode 100644 owl-bot-staging/bigtable_admin/v2/docs/conf.py create mode 100644 owl-bot-staging/bigtable_admin/v2/docs/index.rst create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/__init__.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/gapic_version.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/py.typed create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/__init__.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_metadata.json create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_version.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/py.typed create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/__init__.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/__init__.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/common.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/instance.py create mode 100644 owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/table.py create mode 100644 owl-bot-staging/bigtable_admin/v2/mypy.ini create mode 100644 owl-bot-staging/bigtable_admin/v2/noxfile.py create mode 100644 owl-bot-staging/bigtable_admin/v2/scripts/fixup_bigtable_admin_v2_keywords.py create mode 100644 owl-bot-staging/bigtable_admin/v2/setup.py create mode 100644 owl-bot-staging/bigtable_admin/v2/testing/constraints-3.10.txt create mode 100644 owl-bot-staging/bigtable_admin/v2/testing/constraints-3.11.txt create mode 100644 owl-bot-staging/bigtable_admin/v2/testing/constraints-3.12.txt create mode 100644 owl-bot-staging/bigtable_admin/v2/testing/constraints-3.7.txt create mode 100644 owl-bot-staging/bigtable_admin/v2/testing/constraints-3.8.txt create mode 100644 owl-bot-staging/bigtable_admin/v2/testing/constraints-3.9.txt create mode 100644 owl-bot-staging/bigtable_admin/v2/tests/__init__.py create mode 100644 owl-bot-staging/bigtable_admin/v2/tests/unit/__init__.py create mode 100644 owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/__init__.py create mode 100644 owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/__init__.py create mode 100644 owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py create mode 100644 owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py diff --git a/owl-bot-staging/bigtable/v2/.coveragerc b/owl-bot-staging/bigtable/v2/.coveragerc new file mode 100644 index 000000000..9b4f2d9d5 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/.coveragerc @@ -0,0 +1,13 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/bigtable/__init__.py + google/cloud/bigtable/gapic_version.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ diff --git a/owl-bot-staging/bigtable/v2/.flake8 b/owl-bot-staging/bigtable/v2/.flake8 new file mode 100644 index 000000000..29227d4cf --- /dev/null +++ b/owl-bot-staging/bigtable/v2/.flake8 @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! +[flake8] +ignore = E203, E266, E501, W503 +exclude = + # Exclude generated code. + **/proto/** + **/gapic/** + **/services/** + **/types/** + *_pb2.py + + # Standard linting exemptions. + **/.nox/** + __pycache__, + .git, + *.pyc, + conf.py diff --git a/owl-bot-staging/bigtable/v2/MANIFEST.in b/owl-bot-staging/bigtable/v2/MANIFEST.in new file mode 100644 index 000000000..9a3ef5517 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/bigtable *.py +recursive-include google/cloud/bigtable_v2 *.py diff --git a/owl-bot-staging/bigtable/v2/README.rst b/owl-bot-staging/bigtable/v2/README.rst new file mode 100644 index 000000000..d36a3eb74 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Bigtable API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Bigtable API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/bigtable/v2/docs/_static/custom.css b/owl-bot-staging/bigtable/v2/docs/_static/custom.css new file mode 100644 index 000000000..06423be0b --- /dev/null +++ b/owl-bot-staging/bigtable/v2/docs/_static/custom.css @@ -0,0 +1,3 @@ +dl.field-list > dt { + min-width: 100px +} diff --git a/owl-bot-staging/bigtable/v2/docs/bigtable_v2/bigtable.rst b/owl-bot-staging/bigtable/v2/docs/bigtable_v2/bigtable.rst new file mode 100644 index 000000000..9f92e0fee --- /dev/null +++ b/owl-bot-staging/bigtable/v2/docs/bigtable_v2/bigtable.rst @@ -0,0 +1,6 @@ +Bigtable +-------------------------- + +.. automodule:: google.cloud.bigtable_v2.services.bigtable + :members: + :inherited-members: diff --git a/owl-bot-staging/bigtable/v2/docs/bigtable_v2/services.rst b/owl-bot-staging/bigtable/v2/docs/bigtable_v2/services.rst new file mode 100644 index 000000000..1de472763 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/docs/bigtable_v2/services.rst @@ -0,0 +1,6 @@ +Services for Google Cloud Bigtable v2 API +========================================= +.. toctree:: + :maxdepth: 2 + + bigtable diff --git a/owl-bot-staging/bigtable/v2/docs/bigtable_v2/types.rst b/owl-bot-staging/bigtable/v2/docs/bigtable_v2/types.rst new file mode 100644 index 000000000..56a8941a2 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/docs/bigtable_v2/types.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Bigtable v2 API +====================================== + +.. automodule:: google.cloud.bigtable_v2.types + :members: + :show-inheritance: diff --git a/owl-bot-staging/bigtable/v2/docs/conf.py b/owl-bot-staging/bigtable/v2/docs/conf.py new file mode 100644 index 000000000..79859d39b --- /dev/null +++ b/owl-bot-staging/bigtable/v2/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-bigtable documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "4.0.1" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The root toctree document. +root_doc = "index" + +# General information about the project. +project = u"google-cloud-bigtable" +copyright = u"2023, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-bigtable-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + root_doc, + "google-cloud-bigtable.tex", + u"google-cloud-bigtable Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + root_doc, + "google-cloud-bigtable", + u"Google Cloud Bigtable Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + root_doc, + "google-cloud-bigtable", + u"google-cloud-bigtable Documentation", + author, + "google-cloud-bigtable", + "GAPIC library for Google Cloud Bigtable API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/bigtable/v2/docs/index.rst b/owl-bot-staging/bigtable/v2/docs/index.rst new file mode 100644 index 000000000..10a273382 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + bigtable_v2/services + bigtable_v2/types diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable/__init__.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable/__init__.py new file mode 100644 index 000000000..d36716cc2 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable/__init__.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.bigtable import gapic_version as package_version + +__version__ = package_version.__version__ + + +from google.cloud.bigtable_v2.services.bigtable.client import BigtableClient +from google.cloud.bigtable_v2.services.bigtable.async_client import BigtableAsyncClient + +from google.cloud.bigtable_v2.types.bigtable import CheckAndMutateRowRequest +from google.cloud.bigtable_v2.types.bigtable import CheckAndMutateRowResponse +from google.cloud.bigtable_v2.types.bigtable import GenerateInitialChangeStreamPartitionsRequest +from google.cloud.bigtable_v2.types.bigtable import GenerateInitialChangeStreamPartitionsResponse +from google.cloud.bigtable_v2.types.bigtable import MutateRowRequest +from google.cloud.bigtable_v2.types.bigtable import MutateRowResponse +from google.cloud.bigtable_v2.types.bigtable import MutateRowsRequest +from google.cloud.bigtable_v2.types.bigtable import MutateRowsResponse +from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest +from google.cloud.bigtable_v2.types.bigtable import PingAndWarmResponse +from google.cloud.bigtable_v2.types.bigtable import RateLimitInfo +from google.cloud.bigtable_v2.types.bigtable import ReadChangeStreamRequest +from google.cloud.bigtable_v2.types.bigtable import ReadChangeStreamResponse +from google.cloud.bigtable_v2.types.bigtable import ReadModifyWriteRowRequest +from google.cloud.bigtable_v2.types.bigtable import ReadModifyWriteRowResponse +from google.cloud.bigtable_v2.types.bigtable import ReadRowsRequest +from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse +from google.cloud.bigtable_v2.types.bigtable import SampleRowKeysRequest +from google.cloud.bigtable_v2.types.bigtable import SampleRowKeysResponse +from google.cloud.bigtable_v2.types.data import Cell +from google.cloud.bigtable_v2.types.data import Column +from google.cloud.bigtable_v2.types.data import ColumnRange +from google.cloud.bigtable_v2.types.data import Family +from google.cloud.bigtable_v2.types.data import Mutation +from google.cloud.bigtable_v2.types.data import ReadModifyWriteRule +from google.cloud.bigtable_v2.types.data import Row +from google.cloud.bigtable_v2.types.data import RowFilter +from google.cloud.bigtable_v2.types.data import RowRange +from google.cloud.bigtable_v2.types.data import RowSet +from google.cloud.bigtable_v2.types.data import StreamContinuationToken +from google.cloud.bigtable_v2.types.data import StreamContinuationTokens +from google.cloud.bigtable_v2.types.data import StreamPartition +from google.cloud.bigtable_v2.types.data import TimestampRange +from google.cloud.bigtable_v2.types.data import ValueRange +from google.cloud.bigtable_v2.types.feature_flags import FeatureFlags +from google.cloud.bigtable_v2.types.request_stats import FullReadStatsView +from google.cloud.bigtable_v2.types.request_stats import ReadIterationStats +from google.cloud.bigtable_v2.types.request_stats import RequestLatencyStats +from google.cloud.bigtable_v2.types.request_stats import RequestStats +from google.cloud.bigtable_v2.types.response_params import ResponseParams + +__all__ = ('BigtableClient', + 'BigtableAsyncClient', + 'CheckAndMutateRowRequest', + 'CheckAndMutateRowResponse', + 'GenerateInitialChangeStreamPartitionsRequest', + 'GenerateInitialChangeStreamPartitionsResponse', + 'MutateRowRequest', + 'MutateRowResponse', + 'MutateRowsRequest', + 'MutateRowsResponse', + 'PingAndWarmRequest', + 'PingAndWarmResponse', + 'RateLimitInfo', + 'ReadChangeStreamRequest', + 'ReadChangeStreamResponse', + 'ReadModifyWriteRowRequest', + 'ReadModifyWriteRowResponse', + 'ReadRowsRequest', + 'ReadRowsResponse', + 'SampleRowKeysRequest', + 'SampleRowKeysResponse', + 'Cell', + 'Column', + 'ColumnRange', + 'Family', + 'Mutation', + 'ReadModifyWriteRule', + 'Row', + 'RowFilter', + 'RowRange', + 'RowSet', + 'StreamContinuationToken', + 'StreamContinuationTokens', + 'StreamPartition', + 'TimestampRange', + 'ValueRange', + 'FeatureFlags', + 'FullReadStatsView', + 'ReadIterationStats', + 'RequestLatencyStats', + 'RequestStats', + 'ResponseParams', +) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable/gapic_version.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable/gapic_version.py new file mode 100644 index 000000000..360a0d13e --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable/py.typed b/owl-bot-staging/bigtable/v2/google/cloud/bigtable/py.typed new file mode 100644 index 000000000..889d34043 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-bigtable package uses inline types. diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/__init__.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/__init__.py new file mode 100644 index 000000000..7383df7c5 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/__init__.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.bigtable_v2 import gapic_version as package_version + +__version__ = package_version.__version__ + + +from .services.bigtable import BigtableClient +from .services.bigtable import BigtableAsyncClient + +from .types.bigtable import CheckAndMutateRowRequest +from .types.bigtable import CheckAndMutateRowResponse +from .types.bigtable import GenerateInitialChangeStreamPartitionsRequest +from .types.bigtable import GenerateInitialChangeStreamPartitionsResponse +from .types.bigtable import MutateRowRequest +from .types.bigtable import MutateRowResponse +from .types.bigtable import MutateRowsRequest +from .types.bigtable import MutateRowsResponse +from .types.bigtable import PingAndWarmRequest +from .types.bigtable import PingAndWarmResponse +from .types.bigtable import RateLimitInfo +from .types.bigtable import ReadChangeStreamRequest +from .types.bigtable import ReadChangeStreamResponse +from .types.bigtable import ReadModifyWriteRowRequest +from .types.bigtable import ReadModifyWriteRowResponse +from .types.bigtable import ReadRowsRequest +from .types.bigtable import ReadRowsResponse +from .types.bigtable import SampleRowKeysRequest +from .types.bigtable import SampleRowKeysResponse +from .types.data import Cell +from .types.data import Column +from .types.data import ColumnRange +from .types.data import Family +from .types.data import Mutation +from .types.data import ReadModifyWriteRule +from .types.data import Row +from .types.data import RowFilter +from .types.data import RowRange +from .types.data import RowSet +from .types.data import StreamContinuationToken +from .types.data import StreamContinuationTokens +from .types.data import StreamPartition +from .types.data import TimestampRange +from .types.data import ValueRange +from .types.feature_flags import FeatureFlags +from .types.request_stats import FullReadStatsView +from .types.request_stats import ReadIterationStats +from .types.request_stats import RequestLatencyStats +from .types.request_stats import RequestStats +from .types.response_params import ResponseParams + +__all__ = ( + 'BigtableAsyncClient', +'BigtableClient', +'Cell', +'CheckAndMutateRowRequest', +'CheckAndMutateRowResponse', +'Column', +'ColumnRange', +'Family', +'FeatureFlags', +'FullReadStatsView', +'GenerateInitialChangeStreamPartitionsRequest', +'GenerateInitialChangeStreamPartitionsResponse', +'MutateRowRequest', +'MutateRowResponse', +'MutateRowsRequest', +'MutateRowsResponse', +'Mutation', +'PingAndWarmRequest', +'PingAndWarmResponse', +'RateLimitInfo', +'ReadChangeStreamRequest', +'ReadChangeStreamResponse', +'ReadIterationStats', +'ReadModifyWriteRowRequest', +'ReadModifyWriteRowResponse', +'ReadModifyWriteRule', +'ReadRowsRequest', +'ReadRowsResponse', +'RequestLatencyStats', +'RequestStats', +'ResponseParams', +'Row', +'RowFilter', +'RowRange', +'RowSet', +'SampleRowKeysRequest', +'SampleRowKeysResponse', +'StreamContinuationToken', +'StreamContinuationTokens', +'StreamPartition', +'TimestampRange', +'ValueRange', +) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_metadata.json b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_metadata.json new file mode 100644 index 000000000..181dc8ff5 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_metadata.json @@ -0,0 +1,163 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.bigtable_v2", + "protoPackage": "google.bigtable.v2", + "schema": "1.0", + "services": { + "Bigtable": { + "clients": { + "grpc": { + "libraryClient": "BigtableClient", + "rpcs": { + "CheckAndMutateRow": { + "methods": [ + "check_and_mutate_row" + ] + }, + "GenerateInitialChangeStreamPartitions": { + "methods": [ + "generate_initial_change_stream_partitions" + ] + }, + "MutateRow": { + "methods": [ + "mutate_row" + ] + }, + "MutateRows": { + "methods": [ + "mutate_rows" + ] + }, + "PingAndWarm": { + "methods": [ + "ping_and_warm" + ] + }, + "ReadChangeStream": { + "methods": [ + "read_change_stream" + ] + }, + "ReadModifyWriteRow": { + "methods": [ + "read_modify_write_row" + ] + }, + "ReadRows": { + "methods": [ + "read_rows" + ] + }, + "SampleRowKeys": { + "methods": [ + "sample_row_keys" + ] + } + } + }, + "grpc-async": { + "libraryClient": "BigtableAsyncClient", + "rpcs": { + "CheckAndMutateRow": { + "methods": [ + "check_and_mutate_row" + ] + }, + "GenerateInitialChangeStreamPartitions": { + "methods": [ + "generate_initial_change_stream_partitions" + ] + }, + "MutateRow": { + "methods": [ + "mutate_row" + ] + }, + "MutateRows": { + "methods": [ + "mutate_rows" + ] + }, + "PingAndWarm": { + "methods": [ + "ping_and_warm" + ] + }, + "ReadChangeStream": { + "methods": [ + "read_change_stream" + ] + }, + "ReadModifyWriteRow": { + "methods": [ + "read_modify_write_row" + ] + }, + "ReadRows": { + "methods": [ + "read_rows" + ] + }, + "SampleRowKeys": { + "methods": [ + "sample_row_keys" + ] + } + } + }, + "rest": { + "libraryClient": "BigtableClient", + "rpcs": { + "CheckAndMutateRow": { + "methods": [ + "check_and_mutate_row" + ] + }, + "GenerateInitialChangeStreamPartitions": { + "methods": [ + "generate_initial_change_stream_partitions" + ] + }, + "MutateRow": { + "methods": [ + "mutate_row" + ] + }, + "MutateRows": { + "methods": [ + "mutate_rows" + ] + }, + "PingAndWarm": { + "methods": [ + "ping_and_warm" + ] + }, + "ReadChangeStream": { + "methods": [ + "read_change_stream" + ] + }, + "ReadModifyWriteRow": { + "methods": [ + "read_modify_write_row" + ] + }, + "ReadRows": { + "methods": [ + "read_rows" + ] + }, + "SampleRowKeys": { + "methods": [ + "sample_row_keys" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_version.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_version.py new file mode 100644 index 000000000..360a0d13e --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/py.typed b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/py.typed new file mode 100644 index 000000000..889d34043 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-bigtable package uses inline types. diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/__init__.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/__init__.py new file mode 100644 index 000000000..89a37dc92 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/__init__.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/__init__.py new file mode 100644 index 000000000..749087607 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import BigtableClient +from .async_client import BigtableAsyncClient + +__all__ = ( + 'BigtableClient', + 'BigtableAsyncClient', +) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/async_client.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/async_client.py new file mode 100644 index 000000000..c1f68087f --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -0,0 +1,1160 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union + +from google.cloud.bigtable_v2 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.bigtable_v2.types import bigtable +from google.cloud.bigtable_v2.types import data +from google.cloud.bigtable_v2.types import request_stats +from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport +from .client import BigtableClient + + +class BigtableAsyncClient: + """Service for reading from and writing to existing Bigtable + tables. + """ + + _client: BigtableClient + + DEFAULT_ENDPOINT = BigtableClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = BigtableClient.DEFAULT_MTLS_ENDPOINT + + instance_path = staticmethod(BigtableClient.instance_path) + parse_instance_path = staticmethod(BigtableClient.parse_instance_path) + table_path = staticmethod(BigtableClient.table_path) + parse_table_path = staticmethod(BigtableClient.parse_table_path) + common_billing_account_path = staticmethod(BigtableClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(BigtableClient.parse_common_billing_account_path) + common_folder_path = staticmethod(BigtableClient.common_folder_path) + parse_common_folder_path = staticmethod(BigtableClient.parse_common_folder_path) + common_organization_path = staticmethod(BigtableClient.common_organization_path) + parse_common_organization_path = staticmethod(BigtableClient.parse_common_organization_path) + common_project_path = staticmethod(BigtableClient.common_project_path) + parse_common_project_path = staticmethod(BigtableClient.parse_common_project_path) + common_location_path = staticmethod(BigtableClient.common_location_path) + parse_common_location_path = staticmethod(BigtableClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableAsyncClient: The constructed client. + """ + return BigtableClient.from_service_account_info.__func__(BigtableAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableAsyncClient: The constructed client. + """ + return BigtableClient.from_service_account_file.__func__(BigtableAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return BigtableClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> BigtableTransport: + """Returns the transport used by the client instance. + + Returns: + BigtableTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(BigtableClient).get_transport_class, type(BigtableClient)) + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, BigtableTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the bigtable client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.BigtableTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = BigtableClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + def read_rows(self, + request: Optional[Union[bigtable.ReadRowsRequest, dict]] = None, + *, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[bigtable.ReadRowsResponse]]: + r"""Streams back the contents of all requested rows in + key order, optionally applying the same Reader filter to + each. Depending on their size, rows and cells may be + broken up across multiple responses, but atomicity of + each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.ReadRowsRequest, dict]]): + The request object. Request message for + Bigtable.ReadRows. + table_name (:class:`str`): + Required. The unique name of the table from which to + read. Values are of the form + ``projects//instances//tables/``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: + Response message for + Bigtable.ReadRows. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable.ReadRowsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_rows, + default_timeout=43200.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table_name", request.table_name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def sample_row_keys(self, + request: Optional[Union[bigtable.SampleRowKeysRequest, dict]] = None, + *, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[bigtable.SampleRowKeysResponse]]: + r"""Returns a sample of row keys in the table. The + returned row keys will delimit contiguous sections of + the table of approximately equal size, which can be used + to break up the data for distributed tasks like + mapreduces. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.SampleRowKeysRequest, dict]]): + The request object. Request message for + Bigtable.SampleRowKeys. + table_name (:class:`str`): + Required. The unique name of the table from which to + sample row keys. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: + Response message for + Bigtable.SampleRowKeys. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable.SampleRowKeysRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.sample_row_keys, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table_name", request.table_name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def mutate_row(self, + request: Optional[Union[bigtable.MutateRowRequest, dict]] = None, + *, + table_name: Optional[str] = None, + row_key: Optional[bytes] = None, + mutations: Optional[MutableSequence[data.Mutation]] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.MutateRowResponse: + r"""Mutates a row atomically. Cells already present in the row are + left unchanged unless explicitly changed by ``mutation``. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.MutateRowRequest, dict]]): + The request object. Request message for + Bigtable.MutateRow. + table_name (:class:`str`): + Required. The unique name of the table to which the + mutation should be applied. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (:class:`bytes`): + Required. The key of the row to which + the mutation should be applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + mutations (:class:`MutableSequence[google.cloud.bigtable_v2.types.Mutation]`): + Required. Changes to be atomically + applied to the specified row. Entries + are applied in order, meaning that + earlier mutations can be masked by later + ones. Must contain at least one entry + and at most 100000. + + This corresponds to the ``mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.MutateRowResponse: + Response message for + Bigtable.MutateRow. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable.MutateRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if app_profile_id is not None: + request.app_profile_id = app_profile_id + if mutations: + request.mutations.extend(mutations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.mutate_row, + default_retry=retries.Retry( +initial=0.01,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table_name", request.table_name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def mutate_rows(self, + request: Optional[Union[bigtable.MutateRowsRequest, dict]] = None, + *, + table_name: Optional[str] = None, + entries: Optional[MutableSequence[bigtable.MutateRowsRequest.Entry]] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[bigtable.MutateRowsResponse]]: + r"""Mutates multiple rows in a batch. Each individual row + is mutated atomically as in MutateRow, but the entire + batch is not executed atomically. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.MutateRowsRequest, dict]]): + The request object. Request message for + BigtableService.MutateRows. + table_name (:class:`str`): + Required. The unique name of the + table to which the mutations should be + applied. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entries (:class:`MutableSequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]`): + Required. The row keys and + corresponding mutations to be applied in + bulk. Each entry is applied as an atomic + mutation, but the entries may be applied + in arbitrary order (even between entries + for the same row). At least one entry + must be specified, and in total the + entries can contain at most 100000 + mutations. + + This corresponds to the ``entries`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: + Response message for + BigtableService.MutateRows. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, entries, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable.MutateRowsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + if entries: + request.entries.extend(entries) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.mutate_rows, + default_timeout=600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table_name", request.table_name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def check_and_mutate_row(self, + request: Optional[Union[bigtable.CheckAndMutateRowRequest, dict]] = None, + *, + table_name: Optional[str] = None, + row_key: Optional[bytes] = None, + predicate_filter: Optional[data.RowFilter] = None, + true_mutations: Optional[MutableSequence[data.Mutation]] = None, + false_mutations: Optional[MutableSequence[data.Mutation]] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.CheckAndMutateRowResponse: + r"""Mutates a row atomically based on the output of a + predicate Reader filter. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.CheckAndMutateRowRequest, dict]]): + The request object. Request message for + Bigtable.CheckAndMutateRow. + table_name (:class:`str`): + Required. The unique name of the table to which the + conditional mutation should be applied. Values are of + the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (:class:`bytes`): + Required. The key of the row to which + the conditional mutation should be + applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + predicate_filter (:class:`google.cloud.bigtable_v2.types.RowFilter`): + The filter to be applied to the contents of the + specified row. Depending on whether or not any results + are yielded, either ``true_mutations`` or + ``false_mutations`` will be executed. If unset, checks + that the row contains any values at all. + + This corresponds to the ``predicate_filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + true_mutations (:class:`MutableSequence[google.cloud.bigtable_v2.types.Mutation]`): + Changes to be atomically applied to the specified row if + ``predicate_filter`` yields at least one cell when + applied to ``row_key``. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + ``false_mutations`` is empty, and at most 100000. + + This corresponds to the ``true_mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + false_mutations (:class:`MutableSequence[google.cloud.bigtable_v2.types.Mutation]`): + Changes to be atomically applied to the specified row if + ``predicate_filter`` does not yield any cells when + applied to ``row_key``. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + ``true_mutations`` is empty, and at most 100000. + + This corresponds to the ``false_mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: + Response message for + Bigtable.CheckAndMutateRow. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, predicate_filter, true_mutations, false_mutations, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable.CheckAndMutateRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if predicate_filter is not None: + request.predicate_filter = predicate_filter + if app_profile_id is not None: + request.app_profile_id = app_profile_id + if true_mutations: + request.true_mutations.extend(true_mutations) + if false_mutations: + request.false_mutations.extend(false_mutations) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.check_and_mutate_row, + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table_name", request.table_name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def ping_and_warm(self, + request: Optional[Union[bigtable.PingAndWarmRequest, dict]] = None, + *, + name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.PingAndWarmResponse: + r"""Warm up associated instance metadata for this + connection. This call is not required but may be useful + for connection keep-alive. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]]): + The request object. Request message for client connection + keep-alive and warming. + name (:class:`str`): + Required. The unique name of the instance to check + permissions for as well as respond. Values are of the + form ``projects//instances/``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.PingAndWarmResponse: + Response message for + Bigtable.PingAndWarm connection + keepalive and warming. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable.PingAndWarmRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.ping_and_warm, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def read_modify_write_row(self, + request: Optional[Union[bigtable.ReadModifyWriteRowRequest, dict]] = None, + *, + table_name: Optional[str] = None, + row_key: Optional[bytes] = None, + rules: Optional[MutableSequence[data.ReadModifyWriteRule]] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.ReadModifyWriteRowResponse: + r"""Modifies a row atomically on the server. The method + reads the latest existing timestamp and value from the + specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for + the timestamp is the greater of the existing timestamp + or the current server time. The method returns the new + contents of all modified cells. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest, dict]]): + The request object. Request message for + Bigtable.ReadModifyWriteRow. + table_name (:class:`str`): + Required. The unique name of the table to which the + read/modify/write rules should be applied. Values are of + the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (:class:`bytes`): + Required. The key of the row to which + the read/modify/write rules should be + applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + rules (:class:`MutableSequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]`): + Required. Rules specifying how the + specified row's contents are to be + transformed into writes. Entries are + applied in order, meaning that earlier + rules will affect the results of later + ones. + + This corresponds to the ``rules`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: + Response message for + Bigtable.ReadModifyWriteRow. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, rules, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable.ReadModifyWriteRowRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if app_profile_id is not None: + request.app_profile_id = app_profile_id + if rules: + request.rules.extend(rules) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_modify_write_row, + default_timeout=20.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table_name", request.table_name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def generate_initial_change_stream_partitions(self, + request: Optional[Union[bigtable.GenerateInitialChangeStreamPartitionsRequest, dict]] = None, + *, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[bigtable.GenerateInitialChangeStreamPartitionsResponse]]: + r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. + Returns the current list of partitions that make up the table's + change stream. The union of partitions will cover the entire + keyspace. Partitions can be read with ``ReadChangeStream``. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]]): + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for + Bigtable.GenerateInitialChangeStreamPartitions. + table_name (:class:`str`): + Required. The unique name of the table from which to get + change stream partitions. Values are of the form + ``projects//instances//tables/
``. + Change streaming must be enabled on the table. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. Single cluster routing must be + configured on the profile. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: + NOTE: This API is intended to be used + by Apache Beam BigtableIO. Response + message for + Bigtable.GenerateInitialChangeStreamPartitions. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable.GenerateInitialChangeStreamPartitionsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.generate_initial_change_stream_partitions, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table_name", request.table_name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_change_stream(self, + request: Optional[Union[bigtable.ReadChangeStreamRequest, dict]] = None, + *, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Awaitable[AsyncIterable[bigtable.ReadChangeStreamResponse]]: + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. + + Args: + request (Optional[Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]]): + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. + table_name (:class:`str`): + Required. The unique name of the table from which to + read a change stream. Values are of the form + ``projects//instances//tables/
``. + Change streaming must be enabled on the table. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. Single cluster routing must be + configured on the profile. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + AsyncIterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: + NOTE: This API is intended to be used + by Apache Beam BigtableIO. Response + message for Bigtable.ReadChangeStream. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable.ReadChangeStreamRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.read_change_stream, + default_timeout=43200.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table_name", request.table_name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "BigtableAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "BigtableAsyncClient", +) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/client.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/client.py new file mode 100644 index 000000000..0f576325a --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/client.py @@ -0,0 +1,1415 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Iterable, Sequence, Tuple, Type, Union, cast + +from google.cloud.bigtable_v2 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.cloud.bigtable_v2.types import bigtable +from google.cloud.bigtable_v2.types import data +from google.cloud.bigtable_v2.types import request_stats +from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import BigtableGrpcTransport +from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport +from .transports.rest import BigtableRestTransport + + +class BigtableClientMeta(type): + """Metaclass for the Bigtable client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] + _transport_registry["grpc"] = BigtableGrpcTransport + _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport + _transport_registry["rest"] = BigtableRestTransport + + def get_transport_class(cls, + label: Optional[str] = None, + ) -> Type[BigtableTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class BigtableClient(metaclass=BigtableClientMeta): + """Service for reading from and writing to existing Bigtable + tables. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "bigtable.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableTransport: + """Returns the transport used by the client instance. + + Returns: + BigtableTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def instance_path(project: str,instance: str,) -> str: + """Returns a fully-qualified instance string.""" + return "projects/{project}/instances/{instance}".format(project=project, instance=instance, ) + + @staticmethod + def parse_instance_path(path: str) -> Dict[str,str]: + """Parses a instance path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def table_path(project: str,instance: str,table: str,) -> str: + """Returns a fully-qualified table string.""" + return "projects/{project}/instances/{instance}/tables/{table}".format(project=project, instance=instance, table=table, ) + + @staticmethod + def parse_table_path(path: str) -> Dict[str,str]: + """Parses a table path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, BigtableTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the bigtable client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, BigtableTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, BigtableTransport): + # transport is a BigtableTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def read_rows(self, + request: Optional[Union[bigtable.ReadRowsRequest, dict]] = None, + *, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.ReadRowsResponse]: + r"""Streams back the contents of all requested rows in + key order, optionally applying the same Reader filter to + each. Depending on their size, rows and cells may be + broken up across multiple responses, but atomicity of + each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Args: + request (Union[google.cloud.bigtable_v2.types.ReadRowsRequest, dict]): + The request object. Request message for + Bigtable.ReadRows. + table_name (str): + Required. The unique name of the table from which to + read. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: + Response message for + Bigtable.ReadRows. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.ReadRowsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.ReadRowsRequest): + request = bigtable.ReadRowsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_rows] + + header_params = {} + + routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$') + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def sample_row_keys(self, + request: Optional[Union[bigtable.SampleRowKeysRequest, dict]] = None, + *, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.SampleRowKeysResponse]: + r"""Returns a sample of row keys in the table. The + returned row keys will delimit contiguous sections of + the table of approximately equal size, which can be used + to break up the data for distributed tasks like + mapreduces. + + Args: + request (Union[google.cloud.bigtable_v2.types.SampleRowKeysRequest, dict]): + The request object. Request message for + Bigtable.SampleRowKeys. + table_name (str): + Required. The unique name of the table from which to + sample row keys. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: + Response message for + Bigtable.SampleRowKeys. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.SampleRowKeysRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.SampleRowKeysRequest): + request = bigtable.SampleRowKeysRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.sample_row_keys] + + header_params = {} + + routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$') + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def mutate_row(self, + request: Optional[Union[bigtable.MutateRowRequest, dict]] = None, + *, + table_name: Optional[str] = None, + row_key: Optional[bytes] = None, + mutations: Optional[MutableSequence[data.Mutation]] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.MutateRowResponse: + r"""Mutates a row atomically. Cells already present in the row are + left unchanged unless explicitly changed by ``mutation``. + + Args: + request (Union[google.cloud.bigtable_v2.types.MutateRowRequest, dict]): + The request object. Request message for + Bigtable.MutateRow. + table_name (str): + Required. The unique name of the table to which the + mutation should be applied. Values are of the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (bytes): + Required. The key of the row to which + the mutation should be applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): + Required. Changes to be atomically + applied to the specified row. Entries + are applied in order, meaning that + earlier mutations can be masked by later + ones. Must contain at least one entry + and at most 100000. + + This corresponds to the ``mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.MutateRowResponse: + Response message for + Bigtable.MutateRow. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.MutateRowRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.MutateRowRequest): + request = bigtable.MutateRowRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if mutations is not None: + request.mutations = mutations + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.mutate_row] + + header_params = {} + + routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$') + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def mutate_rows(self, + request: Optional[Union[bigtable.MutateRowsRequest, dict]] = None, + *, + table_name: Optional[str] = None, + entries: Optional[MutableSequence[bigtable.MutateRowsRequest.Entry]] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.MutateRowsResponse]: + r"""Mutates multiple rows in a batch. Each individual row + is mutated atomically as in MutateRow, but the entire + batch is not executed atomically. + + Args: + request (Union[google.cloud.bigtable_v2.types.MutateRowsRequest, dict]): + The request object. Request message for + BigtableService.MutateRows. + table_name (str): + Required. The unique name of the + table to which the mutations should be + applied. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + entries (MutableSequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]): + Required. The row keys and + corresponding mutations to be applied in + bulk. Each entry is applied as an atomic + mutation, but the entries may be applied + in arbitrary order (even between entries + for the same row). At least one entry + must be specified, and in total the + entries can contain at most 100000 + mutations. + + This corresponds to the ``entries`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: + Response message for + BigtableService.MutateRows. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, entries, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.MutateRowsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.MutateRowsRequest): + request = bigtable.MutateRowsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if entries is not None: + request.entries = entries + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.mutate_rows] + + header_params = {} + + routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$') + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def check_and_mutate_row(self, + request: Optional[Union[bigtable.CheckAndMutateRowRequest, dict]] = None, + *, + table_name: Optional[str] = None, + row_key: Optional[bytes] = None, + predicate_filter: Optional[data.RowFilter] = None, + true_mutations: Optional[MutableSequence[data.Mutation]] = None, + false_mutations: Optional[MutableSequence[data.Mutation]] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.CheckAndMutateRowResponse: + r"""Mutates a row atomically based on the output of a + predicate Reader filter. + + Args: + request (Union[google.cloud.bigtable_v2.types.CheckAndMutateRowRequest, dict]): + The request object. Request message for + Bigtable.CheckAndMutateRow. + table_name (str): + Required. The unique name of the table to which the + conditional mutation should be applied. Values are of + the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (bytes): + Required. The key of the row to which + the conditional mutation should be + applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + predicate_filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to be applied to the contents of the + specified row. Depending on whether or not any results + are yielded, either ``true_mutations`` or + ``false_mutations`` will be executed. If unset, checks + that the row contains any values at all. + + This corresponds to the ``predicate_filter`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + true_mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): + Changes to be atomically applied to the specified row if + ``predicate_filter`` yields at least one cell when + applied to ``row_key``. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + ``false_mutations`` is empty, and at most 100000. + + This corresponds to the ``true_mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + false_mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): + Changes to be atomically applied to the specified row if + ``predicate_filter`` does not yield any cells when + applied to ``row_key``. Entries are applied in order, + meaning that earlier mutations can be masked by later + ones. Must contain at least one entry if + ``true_mutations`` is empty, and at most 100000. + + This corresponds to the ``false_mutations`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: + Response message for + Bigtable.CheckAndMutateRow. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, predicate_filter, true_mutations, false_mutations, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.CheckAndMutateRowRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.CheckAndMutateRowRequest): + request = bigtable.CheckAndMutateRowRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if predicate_filter is not None: + request.predicate_filter = predicate_filter + if true_mutations is not None: + request.true_mutations = true_mutations + if false_mutations is not None: + request.false_mutations = false_mutations + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.check_and_mutate_row] + + header_params = {} + + routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$') + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def ping_and_warm(self, + request: Optional[Union[bigtable.PingAndWarmRequest, dict]] = None, + *, + name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.PingAndWarmResponse: + r"""Warm up associated instance metadata for this + connection. This call is not required but may be useful + for connection keep-alive. + + Args: + request (Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]): + The request object. Request message for client connection + keep-alive and warming. + name (str): + Required. The unique name of the instance to check + permissions for as well as respond. Values are of the + form ``projects//instances/``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.PingAndWarmResponse: + Response message for + Bigtable.PingAndWarm connection + keepalive and warming. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.PingAndWarmRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.PingAndWarmRequest): + request = bigtable.PingAndWarmRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.ping_and_warm] + + header_params = {} + + routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+)$') + regex_match = routing_param_regex.match(request.name) + if regex_match and regex_match.group("name"): + header_params["name"] = regex_match.group("name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_modify_write_row(self, + request: Optional[Union[bigtable.ReadModifyWriteRowRequest, dict]] = None, + *, + table_name: Optional[str] = None, + row_key: Optional[bytes] = None, + rules: Optional[MutableSequence[data.ReadModifyWriteRule]] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable.ReadModifyWriteRowResponse: + r"""Modifies a row atomically on the server. The method + reads the latest existing timestamp and value from the + specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for + the timestamp is the greater of the existing timestamp + or the current server time. The method returns the new + contents of all modified cells. + + Args: + request (Union[google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest, dict]): + The request object. Request message for + Bigtable.ReadModifyWriteRow. + table_name (str): + Required. The unique name of the table to which the + read/modify/write rules should be applied. Values are of + the form + ``projects//instances//tables/
``. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + row_key (bytes): + Required. The key of the row to which + the read/modify/write rules should be + applied. + + This corresponds to the ``row_key`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + rules (MutableSequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]): + Required. Rules specifying how the + specified row's contents are to be + transformed into writes. Entries are + applied in order, meaning that earlier + rules will affect the results of later + ones. + + This corresponds to the ``rules`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: + Response message for + Bigtable.ReadModifyWriteRow. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, row_key, rules, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.ReadModifyWriteRowRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.ReadModifyWriteRowRequest): + request = bigtable.ReadModifyWriteRowRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if row_key is not None: + request.row_key = row_key + if rules is not None: + request.rules = rules + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_modify_write_row] + + header_params = {} + + routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$') + regex_match = routing_param_regex.match(request.table_name) + if regex_match and regex_match.group("table_name"): + header_params["table_name"] = regex_match.group("table_name") + + if request.app_profile_id: + header_params["app_profile_id"] = request.app_profile_id + + if header_params: + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def generate_initial_change_stream_partitions(self, + request: Optional[Union[bigtable.GenerateInitialChangeStreamPartitionsRequest, dict]] = None, + *, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.GenerateInitialChangeStreamPartitionsResponse]: + r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. + Returns the current list of partitions that make up the table's + change stream. The union of partitions will cover the entire + keyspace. Partitions can be read with ``ReadChangeStream``. + + Args: + request (Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]): + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for + Bigtable.GenerateInitialChangeStreamPartitions. + table_name (str): + Required. The unique name of the table from which to get + change stream partitions. Values are of the form + ``projects//instances//tables/
``. + Change streaming must be enabled on the table. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. Single cluster routing must be + configured on the profile. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: + NOTE: This API is intended to be used + by Apache Beam BigtableIO. Response + message for + Bigtable.GenerateInitialChangeStreamPartitions. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.GenerateInitialChangeStreamPartitionsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.GenerateInitialChangeStreamPartitionsRequest): + request = bigtable.GenerateInitialChangeStreamPartitionsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.generate_initial_change_stream_partitions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table_name", request.table_name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def read_change_stream(self, + request: Optional[Union[bigtable.ReadChangeStreamRequest, dict]] = None, + *, + table_name: Optional[str] = None, + app_profile_id: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> Iterable[bigtable.ReadChangeStreamResponse]: + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. + + Args: + request (Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]): + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. + table_name (str): + Required. The unique name of the table from which to + read a change stream. Values are of the form + ``projects//instances//tables/
``. + Change streaming must be enabled on the table. + + This corresponds to the ``table_name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. Single cluster routing must be + configured on the profile. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + Iterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: + NOTE: This API is intended to be used + by Apache Beam BigtableIO. Response + message for Bigtable.ReadChangeStream. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table_name, app_profile_id]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable.ReadChangeStreamRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable.ReadChangeStreamRequest): + request = bigtable.ReadChangeStreamRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table_name is not None: + request.table_name = table_name + if app_profile_id is not None: + request.app_profile_id = app_profile_id + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.read_change_stream] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table_name", request.table_name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "BigtableClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + + + + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "BigtableClient", +) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py new file mode 100644 index 000000000..49f168289 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import BigtableTransport +from .grpc import BigtableGrpcTransport +from .grpc_asyncio import BigtableGrpcAsyncIOTransport +from .rest import BigtableRestTransport +from .rest import BigtableRestInterceptor + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] +_transport_registry['grpc'] = BigtableGrpcTransport +_transport_registry['grpc_asyncio'] = BigtableGrpcAsyncIOTransport +_transport_registry['rest'] = BigtableRestTransport + +__all__ = ( + 'BigtableTransport', + 'BigtableGrpcTransport', + 'BigtableGrpcAsyncIOTransport', + 'BigtableRestTransport', + 'BigtableRestInterceptor', +) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/base.py new file mode 100644 index 000000000..091b2425f --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -0,0 +1,272 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.bigtable_v2 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.bigtable_v2.types import bigtable + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +class BigtableTransport(abc.ABC): + """Abstract transport class for Bigtable.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/bigtable.data', + 'https://www.googleapis.com/auth/bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-bigtable.data', + 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', + ) + + DEFAULT_HOST: str = 'bigtable.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience(api_audience if api_audience else host) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.read_rows: gapic_v1.method.wrap_method( + self.read_rows, + default_timeout=43200.0, + client_info=client_info, + ), + self.sample_row_keys: gapic_v1.method.wrap_method( + self.sample_row_keys, + default_timeout=60.0, + client_info=client_info, + ), + self.mutate_row: gapic_v1.method.wrap_method( + self.mutate_row, + default_retry=retries.Retry( +initial=0.01,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.mutate_rows: gapic_v1.method.wrap_method( + self.mutate_rows, + default_timeout=600.0, + client_info=client_info, + ), + self.check_and_mutate_row: gapic_v1.method.wrap_method( + self.check_and_mutate_row, + default_timeout=20.0, + client_info=client_info, + ), + self.ping_and_warm: gapic_v1.method.wrap_method( + self.ping_and_warm, + default_timeout=None, + client_info=client_info, + ), + self.read_modify_write_row: gapic_v1.method.wrap_method( + self.read_modify_write_row, + default_timeout=20.0, + client_info=client_info, + ), + self.generate_initial_change_stream_partitions: gapic_v1.method.wrap_method( + self.generate_initial_change_stream_partitions, + default_timeout=60.0, + client_info=client_info, + ), + self.read_change_stream: gapic_v1.method.wrap_method( + self.read_change_stream, + default_timeout=43200.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def read_rows(self) -> Callable[ + [bigtable.ReadRowsRequest], + Union[ + bigtable.ReadRowsResponse, + Awaitable[bigtable.ReadRowsResponse] + ]]: + raise NotImplementedError() + + @property + def sample_row_keys(self) -> Callable[ + [bigtable.SampleRowKeysRequest], + Union[ + bigtable.SampleRowKeysResponse, + Awaitable[bigtable.SampleRowKeysResponse] + ]]: + raise NotImplementedError() + + @property + def mutate_row(self) -> Callable[ + [bigtable.MutateRowRequest], + Union[ + bigtable.MutateRowResponse, + Awaitable[bigtable.MutateRowResponse] + ]]: + raise NotImplementedError() + + @property + def mutate_rows(self) -> Callable[ + [bigtable.MutateRowsRequest], + Union[ + bigtable.MutateRowsResponse, + Awaitable[bigtable.MutateRowsResponse] + ]]: + raise NotImplementedError() + + @property + def check_and_mutate_row(self) -> Callable[ + [bigtable.CheckAndMutateRowRequest], + Union[ + bigtable.CheckAndMutateRowResponse, + Awaitable[bigtable.CheckAndMutateRowResponse] + ]]: + raise NotImplementedError() + + @property + def ping_and_warm(self) -> Callable[ + [bigtable.PingAndWarmRequest], + Union[ + bigtable.PingAndWarmResponse, + Awaitable[bigtable.PingAndWarmResponse] + ]]: + raise NotImplementedError() + + @property + def read_modify_write_row(self) -> Callable[ + [bigtable.ReadModifyWriteRowRequest], + Union[ + bigtable.ReadModifyWriteRowResponse, + Awaitable[bigtable.ReadModifyWriteRowResponse] + ]]: + raise NotImplementedError() + + @property + def generate_initial_change_stream_partitions(self) -> Callable[ + [bigtable.GenerateInitialChangeStreamPartitionsRequest], + Union[ + bigtable.GenerateInitialChangeStreamPartitionsResponse, + Awaitable[bigtable.GenerateInitialChangeStreamPartitionsResponse] + ]]: + raise NotImplementedError() + + @property + def read_change_stream(self) -> Callable[ + [bigtable.ReadChangeStreamRequest], + Union[ + bigtable.ReadChangeStreamResponse, + Awaitable[bigtable.ReadChangeStreamResponse] + ]]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'BigtableTransport', +) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py new file mode 100644 index 000000000..3ccc82149 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -0,0 +1,501 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.bigtable_v2.types import bigtable +from .base import BigtableTransport, DEFAULT_CLIENT_INFO + + +class BigtableGrpcTransport(BigtableTransport): + """gRPC backend transport for Bigtable. + + Service for reading from and writing to existing Bigtable + tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'bigtable.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'bigtable.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def read_rows(self) -> Callable[ + [bigtable.ReadRowsRequest], + bigtable.ReadRowsResponse]: + r"""Return a callable for the read rows method over gRPC. + + Streams back the contents of all requested rows in + key order, optionally applying the same Reader filter to + each. Depending on their size, rows and cells may be + broken up across multiple responses, but atomicity of + each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Returns: + Callable[[~.ReadRowsRequest], + ~.ReadRowsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_rows' not in self._stubs: + self._stubs['read_rows'] = self.grpc_channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadRows', + request_serializer=bigtable.ReadRowsRequest.serialize, + response_deserializer=bigtable.ReadRowsResponse.deserialize, + ) + return self._stubs['read_rows'] + + @property + def sample_row_keys(self) -> Callable[ + [bigtable.SampleRowKeysRequest], + bigtable.SampleRowKeysResponse]: + r"""Return a callable for the sample row keys method over gRPC. + + Returns a sample of row keys in the table. The + returned row keys will delimit contiguous sections of + the table of approximately equal size, which can be used + to break up the data for distributed tasks like + mapreduces. + + Returns: + Callable[[~.SampleRowKeysRequest], + ~.SampleRowKeysResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'sample_row_keys' not in self._stubs: + self._stubs['sample_row_keys'] = self.grpc_channel.unary_stream( + '/google.bigtable.v2.Bigtable/SampleRowKeys', + request_serializer=bigtable.SampleRowKeysRequest.serialize, + response_deserializer=bigtable.SampleRowKeysResponse.deserialize, + ) + return self._stubs['sample_row_keys'] + + @property + def mutate_row(self) -> Callable[ + [bigtable.MutateRowRequest], + bigtable.MutateRowResponse]: + r"""Return a callable for the mutate row method over gRPC. + + Mutates a row atomically. Cells already present in the row are + left unchanged unless explicitly changed by ``mutation``. + + Returns: + Callable[[~.MutateRowRequest], + ~.MutateRowResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'mutate_row' not in self._stubs: + self._stubs['mutate_row'] = self.grpc_channel.unary_unary( + '/google.bigtable.v2.Bigtable/MutateRow', + request_serializer=bigtable.MutateRowRequest.serialize, + response_deserializer=bigtable.MutateRowResponse.deserialize, + ) + return self._stubs['mutate_row'] + + @property + def mutate_rows(self) -> Callable[ + [bigtable.MutateRowsRequest], + bigtable.MutateRowsResponse]: + r"""Return a callable for the mutate rows method over gRPC. + + Mutates multiple rows in a batch. Each individual row + is mutated atomically as in MutateRow, but the entire + batch is not executed atomically. + + Returns: + Callable[[~.MutateRowsRequest], + ~.MutateRowsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'mutate_rows' not in self._stubs: + self._stubs['mutate_rows'] = self.grpc_channel.unary_stream( + '/google.bigtable.v2.Bigtable/MutateRows', + request_serializer=bigtable.MutateRowsRequest.serialize, + response_deserializer=bigtable.MutateRowsResponse.deserialize, + ) + return self._stubs['mutate_rows'] + + @property + def check_and_mutate_row(self) -> Callable[ + [bigtable.CheckAndMutateRowRequest], + bigtable.CheckAndMutateRowResponse]: + r"""Return a callable for the check and mutate row method over gRPC. + + Mutates a row atomically based on the output of a + predicate Reader filter. + + Returns: + Callable[[~.CheckAndMutateRowRequest], + ~.CheckAndMutateRowResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'check_and_mutate_row' not in self._stubs: + self._stubs['check_and_mutate_row'] = self.grpc_channel.unary_unary( + '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + request_serializer=bigtable.CheckAndMutateRowRequest.serialize, + response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, + ) + return self._stubs['check_and_mutate_row'] + + @property + def ping_and_warm(self) -> Callable[ + [bigtable.PingAndWarmRequest], + bigtable.PingAndWarmResponse]: + r"""Return a callable for the ping and warm method over gRPC. + + Warm up associated instance metadata for this + connection. This call is not required but may be useful + for connection keep-alive. + + Returns: + Callable[[~.PingAndWarmRequest], + ~.PingAndWarmResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'ping_and_warm' not in self._stubs: + self._stubs['ping_and_warm'] = self.grpc_channel.unary_unary( + '/google.bigtable.v2.Bigtable/PingAndWarm', + request_serializer=bigtable.PingAndWarmRequest.serialize, + response_deserializer=bigtable.PingAndWarmResponse.deserialize, + ) + return self._stubs['ping_and_warm'] + + @property + def read_modify_write_row(self) -> Callable[ + [bigtable.ReadModifyWriteRowRequest], + bigtable.ReadModifyWriteRowResponse]: + r"""Return a callable for the read modify write row method over gRPC. + + Modifies a row atomically on the server. The method + reads the latest existing timestamp and value from the + specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for + the timestamp is the greater of the existing timestamp + or the current server time. The method returns the new + contents of all modified cells. + + Returns: + Callable[[~.ReadModifyWriteRowRequest], + ~.ReadModifyWriteRowResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_modify_write_row' not in self._stubs: + self._stubs['read_modify_write_row'] = self.grpc_channel.unary_unary( + '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, + response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, + ) + return self._stubs['read_modify_write_row'] + + @property + def generate_initial_change_stream_partitions(self) -> Callable[ + [bigtable.GenerateInitialChangeStreamPartitionsRequest], + bigtable.GenerateInitialChangeStreamPartitionsResponse]: + r"""Return a callable for the generate initial change stream + partitions method over gRPC. + + NOTE: This API is intended to be used by Apache Beam BigtableIO. + Returns the current list of partitions that make up the table's + change stream. The union of partitions will cover the entire + keyspace. Partitions can be read with ``ReadChangeStream``. + + Returns: + Callable[[~.GenerateInitialChangeStreamPartitionsRequest], + ~.GenerateInitialChangeStreamPartitionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'generate_initial_change_stream_partitions' not in self._stubs: + self._stubs['generate_initial_change_stream_partitions'] = self.grpc_channel.unary_stream( + '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions', + request_serializer=bigtable.GenerateInitialChangeStreamPartitionsRequest.serialize, + response_deserializer=bigtable.GenerateInitialChangeStreamPartitionsResponse.deserialize, + ) + return self._stubs['generate_initial_change_stream_partitions'] + + @property + def read_change_stream(self) -> Callable[ + [bigtable.ReadChangeStreamRequest], + bigtable.ReadChangeStreamResponse]: + r"""Return a callable for the read change stream method over gRPC. + + NOTE: This API is intended to be used by Apache Beam + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. + + Returns: + Callable[[~.ReadChangeStreamRequest], + ~.ReadChangeStreamResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_change_stream' not in self._stubs: + self._stubs['read_change_stream'] = self.grpc_channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadChangeStream', + request_serializer=bigtable.ReadChangeStreamRequest.serialize, + response_deserializer=bigtable.ReadChangeStreamResponse.deserialize, + ) + return self._stubs['read_change_stream'] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'BigtableGrpcTransport', +) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py new file mode 100644 index 000000000..04a7f9124 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -0,0 +1,500 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.bigtable_v2.types import bigtable +from .base import BigtableTransport, DEFAULT_CLIENT_INFO +from .grpc import BigtableGrpcTransport + + +class BigtableGrpcAsyncIOTransport(BigtableTransport): + """gRPC AsyncIO backend transport for Bigtable. + + Service for reading from and writing to existing Bigtable + tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'bigtable.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'bigtable.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def read_rows(self) -> Callable[ + [bigtable.ReadRowsRequest], + Awaitable[bigtable.ReadRowsResponse]]: + r"""Return a callable for the read rows method over gRPC. + + Streams back the contents of all requested rows in + key order, optionally applying the same Reader filter to + each. Depending on their size, rows and cells may be + broken up across multiple responses, but atomicity of + each row will still be preserved. See the + ReadRowsResponse documentation for details. + + Returns: + Callable[[~.ReadRowsRequest], + Awaitable[~.ReadRowsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_rows' not in self._stubs: + self._stubs['read_rows'] = self.grpc_channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadRows', + request_serializer=bigtable.ReadRowsRequest.serialize, + response_deserializer=bigtable.ReadRowsResponse.deserialize, + ) + return self._stubs['read_rows'] + + @property + def sample_row_keys(self) -> Callable[ + [bigtable.SampleRowKeysRequest], + Awaitable[bigtable.SampleRowKeysResponse]]: + r"""Return a callable for the sample row keys method over gRPC. + + Returns a sample of row keys in the table. The + returned row keys will delimit contiguous sections of + the table of approximately equal size, which can be used + to break up the data for distributed tasks like + mapreduces. + + Returns: + Callable[[~.SampleRowKeysRequest], + Awaitable[~.SampleRowKeysResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'sample_row_keys' not in self._stubs: + self._stubs['sample_row_keys'] = self.grpc_channel.unary_stream( + '/google.bigtable.v2.Bigtable/SampleRowKeys', + request_serializer=bigtable.SampleRowKeysRequest.serialize, + response_deserializer=bigtable.SampleRowKeysResponse.deserialize, + ) + return self._stubs['sample_row_keys'] + + @property + def mutate_row(self) -> Callable[ + [bigtable.MutateRowRequest], + Awaitable[bigtable.MutateRowResponse]]: + r"""Return a callable for the mutate row method over gRPC. + + Mutates a row atomically. Cells already present in the row are + left unchanged unless explicitly changed by ``mutation``. + + Returns: + Callable[[~.MutateRowRequest], + Awaitable[~.MutateRowResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'mutate_row' not in self._stubs: + self._stubs['mutate_row'] = self.grpc_channel.unary_unary( + '/google.bigtable.v2.Bigtable/MutateRow', + request_serializer=bigtable.MutateRowRequest.serialize, + response_deserializer=bigtable.MutateRowResponse.deserialize, + ) + return self._stubs['mutate_row'] + + @property + def mutate_rows(self) -> Callable[ + [bigtable.MutateRowsRequest], + Awaitable[bigtable.MutateRowsResponse]]: + r"""Return a callable for the mutate rows method over gRPC. + + Mutates multiple rows in a batch. Each individual row + is mutated atomically as in MutateRow, but the entire + batch is not executed atomically. + + Returns: + Callable[[~.MutateRowsRequest], + Awaitable[~.MutateRowsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'mutate_rows' not in self._stubs: + self._stubs['mutate_rows'] = self.grpc_channel.unary_stream( + '/google.bigtable.v2.Bigtable/MutateRows', + request_serializer=bigtable.MutateRowsRequest.serialize, + response_deserializer=bigtable.MutateRowsResponse.deserialize, + ) + return self._stubs['mutate_rows'] + + @property + def check_and_mutate_row(self) -> Callable[ + [bigtable.CheckAndMutateRowRequest], + Awaitable[bigtable.CheckAndMutateRowResponse]]: + r"""Return a callable for the check and mutate row method over gRPC. + + Mutates a row atomically based on the output of a + predicate Reader filter. + + Returns: + Callable[[~.CheckAndMutateRowRequest], + Awaitable[~.CheckAndMutateRowResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'check_and_mutate_row' not in self._stubs: + self._stubs['check_and_mutate_row'] = self.grpc_channel.unary_unary( + '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + request_serializer=bigtable.CheckAndMutateRowRequest.serialize, + response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, + ) + return self._stubs['check_and_mutate_row'] + + @property + def ping_and_warm(self) -> Callable[ + [bigtable.PingAndWarmRequest], + Awaitable[bigtable.PingAndWarmResponse]]: + r"""Return a callable for the ping and warm method over gRPC. + + Warm up associated instance metadata for this + connection. This call is not required but may be useful + for connection keep-alive. + + Returns: + Callable[[~.PingAndWarmRequest], + Awaitable[~.PingAndWarmResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'ping_and_warm' not in self._stubs: + self._stubs['ping_and_warm'] = self.grpc_channel.unary_unary( + '/google.bigtable.v2.Bigtable/PingAndWarm', + request_serializer=bigtable.PingAndWarmRequest.serialize, + response_deserializer=bigtable.PingAndWarmResponse.deserialize, + ) + return self._stubs['ping_and_warm'] + + @property + def read_modify_write_row(self) -> Callable[ + [bigtable.ReadModifyWriteRowRequest], + Awaitable[bigtable.ReadModifyWriteRowResponse]]: + r"""Return a callable for the read modify write row method over gRPC. + + Modifies a row atomically on the server. The method + reads the latest existing timestamp and value from the + specified columns and writes a new entry based on + pre-defined read/modify/write rules. The new value for + the timestamp is the greater of the existing timestamp + or the current server time. The method returns the new + contents of all modified cells. + + Returns: + Callable[[~.ReadModifyWriteRowRequest], + Awaitable[~.ReadModifyWriteRowResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_modify_write_row' not in self._stubs: + self._stubs['read_modify_write_row'] = self.grpc_channel.unary_unary( + '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, + response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, + ) + return self._stubs['read_modify_write_row'] + + @property + def generate_initial_change_stream_partitions(self) -> Callable[ + [bigtable.GenerateInitialChangeStreamPartitionsRequest], + Awaitable[bigtable.GenerateInitialChangeStreamPartitionsResponse]]: + r"""Return a callable for the generate initial change stream + partitions method over gRPC. + + NOTE: This API is intended to be used by Apache Beam BigtableIO. + Returns the current list of partitions that make up the table's + change stream. The union of partitions will cover the entire + keyspace. Partitions can be read with ``ReadChangeStream``. + + Returns: + Callable[[~.GenerateInitialChangeStreamPartitionsRequest], + Awaitable[~.GenerateInitialChangeStreamPartitionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'generate_initial_change_stream_partitions' not in self._stubs: + self._stubs['generate_initial_change_stream_partitions'] = self.grpc_channel.unary_stream( + '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions', + request_serializer=bigtable.GenerateInitialChangeStreamPartitionsRequest.serialize, + response_deserializer=bigtable.GenerateInitialChangeStreamPartitionsResponse.deserialize, + ) + return self._stubs['generate_initial_change_stream_partitions'] + + @property + def read_change_stream(self) -> Callable[ + [bigtable.ReadChangeStreamRequest], + Awaitable[bigtable.ReadChangeStreamResponse]]: + r"""Return a callable for the read change stream method over gRPC. + + NOTE: This API is intended to be used by Apache Beam + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. + + Returns: + Callable[[~.ReadChangeStreamRequest], + Awaitable[~.ReadChangeStreamResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'read_change_stream' not in self._stubs: + self._stubs['read_change_stream'] = self.grpc_channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadChangeStream', + request_serializer=bigtable.ReadChangeStreamRequest.serialize, + response_deserializer=bigtable.ReadChangeStreamResponse.deserialize, + ) + return self._stubs['read_change_stream'] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ( + 'BigtableGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/rest.py new file mode 100644 index 000000000..8e5060b85 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -0,0 +1,1261 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.cloud.bigtable_v2.types import bigtable + +from .base import BigtableTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class BigtableRestInterceptor: + """Interceptor for Bigtable. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the BigtableRestTransport. + + .. code-block:: python + class MyCustomBigtableInterceptor(BigtableRestInterceptor): + def pre_check_and_mutate_row(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_check_and_mutate_row(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_generate_initial_change_stream_partitions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_generate_initial_change_stream_partitions(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_mutate_row(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_mutate_row(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_mutate_rows(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_mutate_rows(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_ping_and_warm(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_ping_and_warm(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_read_change_stream(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_read_change_stream(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_read_modify_write_row(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_read_modify_write_row(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_read_rows(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_read_rows(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_sample_row_keys(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_sample_row_keys(self, response): + logging.log(f"Received response: {response}") + return response + + transport = BigtableRestTransport(interceptor=MyCustomBigtableInterceptor()) + client = BigtableClient(transport=transport) + + + """ + def pre_check_and_mutate_row(self, request: bigtable.CheckAndMutateRowRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.CheckAndMutateRowRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for check_and_mutate_row + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_check_and_mutate_row(self, response: bigtable.CheckAndMutateRowResponse) -> bigtable.CheckAndMutateRowResponse: + """Post-rpc interceptor for check_and_mutate_row + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + def pre_generate_initial_change_stream_partitions(self, request: bigtable.GenerateInitialChangeStreamPartitionsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.GenerateInitialChangeStreamPartitionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for generate_initial_change_stream_partitions + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_generate_initial_change_stream_partitions(self, response: rest_streaming.ResponseIterator) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for generate_initial_change_stream_partitions + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + def pre_mutate_row(self, request: bigtable.MutateRowRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.MutateRowRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for mutate_row + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_mutate_row(self, response: bigtable.MutateRowResponse) -> bigtable.MutateRowResponse: + """Post-rpc interceptor for mutate_row + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + def pre_mutate_rows(self, request: bigtable.MutateRowsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.MutateRowsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for mutate_rows + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_mutate_rows(self, response: rest_streaming.ResponseIterator) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for mutate_rows + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + def pre_ping_and_warm(self, request: bigtable.PingAndWarmRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.PingAndWarmRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for ping_and_warm + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_ping_and_warm(self, response: bigtable.PingAndWarmResponse) -> bigtable.PingAndWarmResponse: + """Post-rpc interceptor for ping_and_warm + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + def pre_read_change_stream(self, request: bigtable.ReadChangeStreamRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.ReadChangeStreamRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for read_change_stream + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_read_change_stream(self, response: rest_streaming.ResponseIterator) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for read_change_stream + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + def pre_read_modify_write_row(self, request: bigtable.ReadModifyWriteRowRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.ReadModifyWriteRowRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for read_modify_write_row + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_read_modify_write_row(self, response: bigtable.ReadModifyWriteRowResponse) -> bigtable.ReadModifyWriteRowResponse: + """Post-rpc interceptor for read_modify_write_row + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + def pre_read_rows(self, request: bigtable.ReadRowsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.ReadRowsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for read_rows + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_read_rows(self, response: rest_streaming.ResponseIterator) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for read_rows + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + def pre_sample_row_keys(self, request: bigtable.SampleRowKeysRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.SampleRowKeysRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for sample_row_keys + + Override in a subclass to manipulate the request or metadata + before they are sent to the Bigtable server. + """ + return request, metadata + + def post_sample_row_keys(self, response: rest_streaming.ResponseIterator) -> rest_streaming.ResponseIterator: + """Post-rpc interceptor for sample_row_keys + + Override in a subclass to manipulate the response + after it is returned by the Bigtable server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class BigtableRestStub: + _session: AuthorizedSession + _host: str + _interceptor: BigtableRestInterceptor + + +class BigtableRestTransport(BigtableTransport): + """REST backend transport for Bigtable. + + Service for reading from and writing to existing Bigtable + tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__(self, *, + host: str = 'bigtable.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[ + ], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = 'https', + interceptor: Optional[BigtableRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or BigtableRestInterceptor() + self._prep_wrapped_messages(client_info) + + class _CheckAndMutateRow(BigtableRestStub): + def __hash__(self): + return hash("CheckAndMutateRow") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable.CheckAndMutateRowRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> bigtable.CheckAndMutateRowResponse: + r"""Call the check and mutate row method over HTTP. + + Args: + request (~.bigtable.CheckAndMutateRowRequest): + The request object. Request message for + Bigtable.CheckAndMutateRow. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.CheckAndMutateRowResponse: + Response message for + Bigtable.CheckAndMutateRow. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_check_and_mutate_row(request, metadata) + pb_request = bigtable.CheckAndMutateRowRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable.CheckAndMutateRowResponse() + pb_resp = bigtable.CheckAndMutateRowResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_check_and_mutate_row(resp) + return resp + + class _GenerateInitialChangeStreamPartitions(BigtableRestStub): + def __hash__(self): + return hash("GenerateInitialChangeStreamPartitions") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable.GenerateInitialChangeStreamPartitionsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> rest_streaming.ResponseIterator: + r"""Call the generate initial change + stream partitions method over HTTP. + + Args: + request (~.bigtable.GenerateInitialChangeStreamPartitionsRequest): + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for + Bigtable.GenerateInitialChangeStreamPartitions. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.GenerateInitialChangeStreamPartitionsResponse: + NOTE: This API is intended to be used + by Apache Beam BigtableIO. Response + message for + Bigtable.GenerateInitialChangeStreamPartitions. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_generate_initial_change_stream_partitions(request, metadata) + pb_request = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator(response, bigtable.GenerateInitialChangeStreamPartitionsResponse) + resp = self._interceptor.post_generate_initial_change_stream_partitions(resp) + return resp + + class _MutateRow(BigtableRestStub): + def __hash__(self): + return hash("MutateRow") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable.MutateRowRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> bigtable.MutateRowResponse: + r"""Call the mutate row method over HTTP. + + Args: + request (~.bigtable.MutateRowRequest): + The request object. Request message for + Bigtable.MutateRow. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.MutateRowResponse: + Response message for + Bigtable.MutateRow. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_mutate_row(request, metadata) + pb_request = bigtable.MutateRowRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable.MutateRowResponse() + pb_resp = bigtable.MutateRowResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_mutate_row(resp) + return resp + + class _MutateRows(BigtableRestStub): + def __hash__(self): + return hash("MutateRows") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable.MutateRowsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> rest_streaming.ResponseIterator: + r"""Call the mutate rows method over HTTP. + + Args: + request (~.bigtable.MutateRowsRequest): + The request object. Request message for + BigtableService.MutateRows. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.MutateRowsResponse: + Response message for + BigtableService.MutateRows. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_mutate_rows(request, metadata) + pb_request = bigtable.MutateRowsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator(response, bigtable.MutateRowsResponse) + resp = self._interceptor.post_mutate_rows(resp) + return resp + + class _PingAndWarm(BigtableRestStub): + def __hash__(self): + return hash("PingAndWarm") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable.PingAndWarmRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> bigtable.PingAndWarmResponse: + r"""Call the ping and warm method over HTTP. + + Args: + request (~.bigtable.PingAndWarmRequest): + The request object. Request message for client connection + keep-alive and warming. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.PingAndWarmResponse: + Response message for + Bigtable.PingAndWarm connection + keepalive and warming. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{name=projects/*/instances/*}:ping', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_ping_and_warm(request, metadata) + pb_request = bigtable.PingAndWarmRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable.PingAndWarmResponse() + pb_resp = bigtable.PingAndWarmResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_ping_and_warm(resp) + return resp + + class _ReadChangeStream(BigtableRestStub): + def __hash__(self): + return hash("ReadChangeStream") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable.ReadChangeStreamRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> rest_streaming.ResponseIterator: + r"""Call the read change stream method over HTTP. + + Args: + request (~.bigtable.ReadChangeStreamRequest): + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.ReadChangeStreamResponse: + NOTE: This API is intended to be used + by Apache Beam BigtableIO. Response + message for Bigtable.ReadChangeStream. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_read_change_stream(request, metadata) + pb_request = bigtable.ReadChangeStreamRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator(response, bigtable.ReadChangeStreamResponse) + resp = self._interceptor.post_read_change_stream(resp) + return resp + + class _ReadModifyWriteRow(BigtableRestStub): + def __hash__(self): + return hash("ReadModifyWriteRow") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable.ReadModifyWriteRowRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> bigtable.ReadModifyWriteRowResponse: + r"""Call the read modify write row method over HTTP. + + Args: + request (~.bigtable.ReadModifyWriteRowRequest): + The request object. Request message for + Bigtable.ReadModifyWriteRow. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.ReadModifyWriteRowResponse: + Response message for + Bigtable.ReadModifyWriteRow. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_read_modify_write_row(request, metadata) + pb_request = bigtable.ReadModifyWriteRowRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable.ReadModifyWriteRowResponse() + pb_resp = bigtable.ReadModifyWriteRowResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_read_modify_write_row(resp) + return resp + + class _ReadRows(BigtableRestStub): + def __hash__(self): + return hash("ReadRows") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable.ReadRowsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> rest_streaming.ResponseIterator: + r"""Call the read rows method over HTTP. + + Args: + request (~.bigtable.ReadRowsRequest): + The request object. Request message for + Bigtable.ReadRows. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.ReadRowsResponse: + Response message for + Bigtable.ReadRows. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:readRows', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_read_rows(request, metadata) + pb_request = bigtable.ReadRowsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator(response, bigtable.ReadRowsResponse) + resp = self._interceptor.post_read_rows(resp) + return resp + + class _SampleRowKeys(BigtableRestStub): + def __hash__(self): + return hash("SampleRowKeys") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable.SampleRowKeysRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> rest_streaming.ResponseIterator: + r"""Call the sample row keys method over HTTP. + + Args: + request (~.bigtable.SampleRowKeysRequest): + The request object. Request message for + Bigtable.SampleRowKeys. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable.SampleRowKeysResponse: + Response message for + Bigtable.SampleRowKeys. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys', + }, + ] + request, metadata = self._interceptor.pre_sample_row_keys(request, metadata) + pb_request = bigtable.SampleRowKeysRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = rest_streaming.ResponseIterator(response, bigtable.SampleRowKeysResponse) + resp = self._interceptor.post_sample_row_keys(resp) + return resp + + @property + def check_and_mutate_row(self) -> Callable[ + [bigtable.CheckAndMutateRowRequest], + bigtable.CheckAndMutateRowResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CheckAndMutateRow(self._session, self._host, self._interceptor) # type: ignore + + @property + def generate_initial_change_stream_partitions(self) -> Callable[ + [bigtable.GenerateInitialChangeStreamPartitionsRequest], + bigtable.GenerateInitialChangeStreamPartitionsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GenerateInitialChangeStreamPartitions(self._session, self._host, self._interceptor) # type: ignore + + @property + def mutate_row(self) -> Callable[ + [bigtable.MutateRowRequest], + bigtable.MutateRowResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._MutateRow(self._session, self._host, self._interceptor) # type: ignore + + @property + def mutate_rows(self) -> Callable[ + [bigtable.MutateRowsRequest], + bigtable.MutateRowsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._MutateRows(self._session, self._host, self._interceptor) # type: ignore + + @property + def ping_and_warm(self) -> Callable[ + [bigtable.PingAndWarmRequest], + bigtable.PingAndWarmResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._PingAndWarm(self._session, self._host, self._interceptor) # type: ignore + + @property + def read_change_stream(self) -> Callable[ + [bigtable.ReadChangeStreamRequest], + bigtable.ReadChangeStreamResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ReadChangeStream(self._session, self._host, self._interceptor) # type: ignore + + @property + def read_modify_write_row(self) -> Callable[ + [bigtable.ReadModifyWriteRowRequest], + bigtable.ReadModifyWriteRowResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ReadModifyWriteRow(self._session, self._host, self._interceptor) # type: ignore + + @property + def read_rows(self) -> Callable[ + [bigtable.ReadRowsRequest], + bigtable.ReadRowsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ReadRows(self._session, self._host, self._interceptor) # type: ignore + + @property + def sample_row_keys(self) -> Callable[ + [bigtable.SampleRowKeysRequest], + bigtable.SampleRowKeysResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._SampleRowKeys(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__=( + 'BigtableRestTransport', +) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/__init__.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/__init__.py new file mode 100644 index 000000000..c618147af --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/__init__.py @@ -0,0 +1,108 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .bigtable import ( + CheckAndMutateRowRequest, + CheckAndMutateRowResponse, + GenerateInitialChangeStreamPartitionsRequest, + GenerateInitialChangeStreamPartitionsResponse, + MutateRowRequest, + MutateRowResponse, + MutateRowsRequest, + MutateRowsResponse, + PingAndWarmRequest, + PingAndWarmResponse, + RateLimitInfo, + ReadChangeStreamRequest, + ReadChangeStreamResponse, + ReadModifyWriteRowRequest, + ReadModifyWriteRowResponse, + ReadRowsRequest, + ReadRowsResponse, + SampleRowKeysRequest, + SampleRowKeysResponse, +) +from .data import ( + Cell, + Column, + ColumnRange, + Family, + Mutation, + ReadModifyWriteRule, + Row, + RowFilter, + RowRange, + RowSet, + StreamContinuationToken, + StreamContinuationTokens, + StreamPartition, + TimestampRange, + ValueRange, +) +from .feature_flags import ( + FeatureFlags, +) +from .request_stats import ( + FullReadStatsView, + ReadIterationStats, + RequestLatencyStats, + RequestStats, +) +from .response_params import ( + ResponseParams, +) + +__all__ = ( + 'CheckAndMutateRowRequest', + 'CheckAndMutateRowResponse', + 'GenerateInitialChangeStreamPartitionsRequest', + 'GenerateInitialChangeStreamPartitionsResponse', + 'MutateRowRequest', + 'MutateRowResponse', + 'MutateRowsRequest', + 'MutateRowsResponse', + 'PingAndWarmRequest', + 'PingAndWarmResponse', + 'RateLimitInfo', + 'ReadChangeStreamRequest', + 'ReadChangeStreamResponse', + 'ReadModifyWriteRowRequest', + 'ReadModifyWriteRowResponse', + 'ReadRowsRequest', + 'ReadRowsResponse', + 'SampleRowKeysRequest', + 'SampleRowKeysResponse', + 'Cell', + 'Column', + 'ColumnRange', + 'Family', + 'Mutation', + 'ReadModifyWriteRule', + 'Row', + 'RowFilter', + 'RowRange', + 'RowSet', + 'StreamContinuationToken', + 'StreamContinuationTokens', + 'StreamPartition', + 'TimestampRange', + 'ValueRange', + 'FeatureFlags', + 'FullReadStatsView', + 'ReadIterationStats', + 'RequestLatencyStats', + 'RequestStats', + 'ResponseParams', +) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/bigtable.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/bigtable.py new file mode 100644 index 000000000..d0807b46a --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/bigtable.py @@ -0,0 +1,1186 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.bigtable_v2.types import data +from google.cloud.bigtable_v2.types import request_stats as gb_request_stats +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.protobuf import wrappers_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.bigtable.v2', + manifest={ + 'ReadRowsRequest', + 'ReadRowsResponse', + 'SampleRowKeysRequest', + 'SampleRowKeysResponse', + 'MutateRowRequest', + 'MutateRowResponse', + 'MutateRowsRequest', + 'MutateRowsResponse', + 'RateLimitInfo', + 'CheckAndMutateRowRequest', + 'CheckAndMutateRowResponse', + 'PingAndWarmRequest', + 'PingAndWarmResponse', + 'ReadModifyWriteRowRequest', + 'ReadModifyWriteRowResponse', + 'GenerateInitialChangeStreamPartitionsRequest', + 'GenerateInitialChangeStreamPartitionsResponse', + 'ReadChangeStreamRequest', + 'ReadChangeStreamResponse', + }, +) + + +class ReadRowsRequest(proto.Message): + r"""Request message for Bigtable.ReadRows. + + Attributes: + table_name (str): + Required. The unique name of the table from which to read. + Values are of the form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + rows (google.cloud.bigtable_v2.types.RowSet): + The row keys and/or ranges to read + sequentially. If not specified, reads from all + rows. + filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to apply to the contents of the + specified row(s). If unset, reads the entirety + of each row. + rows_limit (int): + The read will stop after committing to N + rows' worth of results. The default (zero) is to + return all results. + request_stats_view (google.cloud.bigtable_v2.types.ReadRowsRequest.RequestStatsView): + The view into RequestStats, as described + above. + reversed (bool): + Experimental API - Please note that this API is currently + experimental and can change in the future. + + Return rows in lexiographical descending order of the row + keys. The row contents will not be affected by this flag. + + Example result set: + + :: + + [ + {key: "k2", "f:col1": "v1", "f:col2": "v1"}, + {key: "k1", "f:col1": "v2", "f:col2": "v2"} + ] + """ + class RequestStatsView(proto.Enum): + r"""The desired view into RequestStats that should be returned in + the response. + See also: RequestStats message. + + Values: + REQUEST_STATS_VIEW_UNSPECIFIED (0): + The default / unset value. The API will + default to the NONE option below. + REQUEST_STATS_NONE (1): + Do not include any RequestStats in the + response. This will leave the RequestStats + embedded message unset in the response. + REQUEST_STATS_FULL (2): + Include the full set of available + RequestStats in the response, applicable to this + read. + """ + REQUEST_STATS_VIEW_UNSPECIFIED = 0 + REQUEST_STATS_NONE = 1 + REQUEST_STATS_FULL = 2 + + table_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=5, + ) + rows: data.RowSet = proto.Field( + proto.MESSAGE, + number=2, + message=data.RowSet, + ) + filter: data.RowFilter = proto.Field( + proto.MESSAGE, + number=3, + message=data.RowFilter, + ) + rows_limit: int = proto.Field( + proto.INT64, + number=4, + ) + request_stats_view: RequestStatsView = proto.Field( + proto.ENUM, + number=6, + enum=RequestStatsView, + ) + reversed: bool = proto.Field( + proto.BOOL, + number=7, + ) + + +class ReadRowsResponse(proto.Message): + r"""Response message for Bigtable.ReadRows. + + Attributes: + chunks (MutableSequence[google.cloud.bigtable_v2.types.ReadRowsResponse.CellChunk]): + A collection of a row's contents as part of + the read request. + last_scanned_row_key (bytes): + Optionally the server might return the row + key of the last row it has scanned. The client + can use this to construct a more efficient retry + request if needed: any row keys or portions of + ranges less than this row key can be dropped + from the request. This is primarily useful for + cases where the server has read a lot of data + that was filtered out since the last committed + row key, allowing the client to skip that work + on a retry. + request_stats (google.cloud.bigtable_v2.types.RequestStats): + If requested, provide enhanced query performance statistics. + The semantics dictate: + + - request_stats is empty on every (streamed) response, + except + - request_stats has non-empty information after all chunks + have been streamed, where the ReadRowsResponse message + only contains request_stats. + + - For example, if a read request would have returned an + empty response instead a single ReadRowsResponse is + streamed with empty chunks and request_stats filled. + + Visually, response messages will stream as follows: ... -> + {chunks: [...]} -> {chunks: [], request_stats: {...}} + \_\ **/ \_**\ \__________/ Primary response Trailer of + RequestStats info + + Or if the read did not return any values: {chunks: [], + request_stats: {...}} \________________________________/ + Trailer of RequestStats info + """ + + class CellChunk(proto.Message): + r"""Specifies a piece of a row's contents returned as part of the + read response stream. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + row_key (bytes): + The row key for this chunk of data. If the + row key is empty, this CellChunk is a + continuation of the same row as the previous + CellChunk in the response stream, even if that + CellChunk was in a previous ReadRowsResponse + message. + family_name (google.protobuf.wrappers_pb2.StringValue): + The column family name for this chunk of data. If this + message is not present this CellChunk is a continuation of + the same column family as the previous CellChunk. The empty + string can occur as a column family name in a response so + clients must check explicitly for the presence of this + message, not just for ``family_name.value`` being non-empty. + qualifier (google.protobuf.wrappers_pb2.BytesValue): + The column qualifier for this chunk of data. If this message + is not present, this CellChunk is a continuation of the same + column as the previous CellChunk. Column qualifiers may be + empty so clients must check for the presence of this + message, not just for ``qualifier.value`` being non-empty. + timestamp_micros (int): + The cell's stored timestamp, which also uniquely identifies + it within its column. Values are always expressed in + microseconds, but individual tables may set a coarser + granularity to further restrict the allowed values. For + example, a table which specifies millisecond granularity + will only allow values of ``timestamp_micros`` which are + multiples of 1000. Timestamps are only set in the first + CellChunk per cell (for cells split into multiple chunks). + labels (MutableSequence[str]): + Labels applied to the cell by a + [RowFilter][google.bigtable.v2.RowFilter]. Labels are only + set on the first CellChunk per cell. + value (bytes): + The value stored in the cell. Cell values + can be split across multiple CellChunks. In + that case only the value field will be set in + CellChunks after the first: the timestamp and + labels will only be present in the first + CellChunk, even if the first CellChunk came in a + previous ReadRowsResponse. + value_size (int): + If this CellChunk is part of a chunked cell value and this + is not the final chunk of that cell, value_size will be set + to the total length of the cell value. The client can use + this size to pre-allocate memory to hold the full cell + value. + reset_row (bool): + Indicates that the client should drop all previous chunks + for ``row_key``, as it will be re-read from the beginning. + + This field is a member of `oneof`_ ``row_status``. + commit_row (bool): + Indicates that the client can safely process all previous + chunks for ``row_key``, as its data has been fully read. + + This field is a member of `oneof`_ ``row_status``. + """ + + row_key: bytes = proto.Field( + proto.BYTES, + number=1, + ) + family_name: wrappers_pb2.StringValue = proto.Field( + proto.MESSAGE, + number=2, + message=wrappers_pb2.StringValue, + ) + qualifier: wrappers_pb2.BytesValue = proto.Field( + proto.MESSAGE, + number=3, + message=wrappers_pb2.BytesValue, + ) + timestamp_micros: int = proto.Field( + proto.INT64, + number=4, + ) + labels: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=5, + ) + value: bytes = proto.Field( + proto.BYTES, + number=6, + ) + value_size: int = proto.Field( + proto.INT32, + number=7, + ) + reset_row: bool = proto.Field( + proto.BOOL, + number=8, + oneof='row_status', + ) + commit_row: bool = proto.Field( + proto.BOOL, + number=9, + oneof='row_status', + ) + + chunks: MutableSequence[CellChunk] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=CellChunk, + ) + last_scanned_row_key: bytes = proto.Field( + proto.BYTES, + number=2, + ) + request_stats: gb_request_stats.RequestStats = proto.Field( + proto.MESSAGE, + number=3, + message=gb_request_stats.RequestStats, + ) + + +class SampleRowKeysRequest(proto.Message): + r"""Request message for Bigtable.SampleRowKeys. + + Attributes: + table_name (str): + Required. The unique name of the table from which to sample + row keys. Values are of the form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + """ + + table_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=2, + ) + + +class SampleRowKeysResponse(proto.Message): + r"""Response message for Bigtable.SampleRowKeys. + + Attributes: + row_key (bytes): + Sorted streamed sequence of sample row keys + in the table. The table might have contents + before the first row key in the list and after + the last one, but a key containing the empty + string indicates "end of table" and will be the + last response given, if present. + Note that row keys in this list may not have + ever been written to or read from, and users + should therefore not make any assumptions about + the row key structure that are specific to their + use case. + offset_bytes (int): + Approximate total storage space used by all rows in the + table which precede ``row_key``. Buffering the contents of + all rows between two subsequent samples would require space + roughly equal to the difference in their ``offset_bytes`` + fields. + """ + + row_key: bytes = proto.Field( + proto.BYTES, + number=1, + ) + offset_bytes: int = proto.Field( + proto.INT64, + number=2, + ) + + +class MutateRowRequest(proto.Message): + r"""Request message for Bigtable.MutateRow. + + Attributes: + table_name (str): + Required. The unique name of the table to which the mutation + should be applied. Values are of the form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + row_key (bytes): + Required. The key of the row to which the + mutation should be applied. + mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): + Required. Changes to be atomically applied to + the specified row. Entries are applied in order, + meaning that earlier mutations can be masked by + later ones. Must contain at least one entry and + at most 100000. + """ + + table_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=4, + ) + row_key: bytes = proto.Field( + proto.BYTES, + number=2, + ) + mutations: MutableSequence[data.Mutation] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=data.Mutation, + ) + + +class MutateRowResponse(proto.Message): + r"""Response message for Bigtable.MutateRow. + """ + + +class MutateRowsRequest(proto.Message): + r"""Request message for BigtableService.MutateRows. + + Attributes: + table_name (str): + Required. The unique name of the table to + which the mutations should be applied. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + entries (MutableSequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]): + Required. The row keys and corresponding + mutations to be applied in bulk. Each entry is + applied as an atomic mutation, but the entries + may be applied in arbitrary order (even between + entries for the same row). At least one entry + must be specified, and in total the entries can + contain at most 100000 mutations. + """ + + class Entry(proto.Message): + r"""A mutation for a given row. + + Attributes: + row_key (bytes): + The key of the row to which the ``mutations`` should be + applied. + mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): + Required. Changes to be atomically applied to + the specified row. Mutations are applied in + order, meaning that earlier mutations can be + masked by later ones. You must specify at least + one mutation. + """ + + row_key: bytes = proto.Field( + proto.BYTES, + number=1, + ) + mutations: MutableSequence[data.Mutation] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=data.Mutation, + ) + + table_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=3, + ) + entries: MutableSequence[Entry] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=Entry, + ) + + +class MutateRowsResponse(proto.Message): + r"""Response message for BigtableService.MutateRows. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + entries (MutableSequence[google.cloud.bigtable_v2.types.MutateRowsResponse.Entry]): + One or more results for Entries from the + batch request. + rate_limit_info (google.cloud.bigtable_v2.types.RateLimitInfo): + Information about how client should limit the + rate (QPS). Primirily used by supported official + Cloud Bigtable clients. If unset, the rate limit + info is not provided by the server. + + This field is a member of `oneof`_ ``_rate_limit_info``. + """ + + class Entry(proto.Message): + r"""The result of applying a passed mutation in the original + request. + + Attributes: + index (int): + The index into the original request's ``entries`` list of + the Entry for which a result is being reported. + status (google.rpc.status_pb2.Status): + The result of the request Entry identified by ``index``. + Depending on how requests are batched during execution, it + is possible for one Entry to fail due to an error with + another Entry. In the event that this occurs, the same error + will be reported for both entries. + """ + + index: int = proto.Field( + proto.INT64, + number=1, + ) + status: status_pb2.Status = proto.Field( + proto.MESSAGE, + number=2, + message=status_pb2.Status, + ) + + entries: MutableSequence[Entry] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=Entry, + ) + rate_limit_info: 'RateLimitInfo' = proto.Field( + proto.MESSAGE, + number=3, + optional=True, + message='RateLimitInfo', + ) + + +class RateLimitInfo(proto.Message): + r"""Information about how client should adjust the load to + Bigtable. + + Attributes: + period (google.protobuf.duration_pb2.Duration): + Time that clients should wait before + adjusting the target rate again. If clients + adjust rate too frequently, the impact of the + previous adjustment may not have been taken into + account and may over-throttle or under-throttle. + If clients adjust rate too slowly, they will not + be responsive to load changes on server side, + and may over-throttle or under-throttle. + factor (float): + If it has been at least one ``period`` since the last load + adjustment, the client should multiply the current load by + this value to get the new target load. For example, if the + current load is 100 and ``factor`` is 0.8, the new target + load should be 80. After adjusting, the client should ignore + ``factor`` until another ``period`` has passed. + + The client can measure its load using any unit that's + comparable over time For example, QPS can be used as long as + each request involves a similar amount of work. + """ + + period: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + factor: float = proto.Field( + proto.DOUBLE, + number=2, + ) + + +class CheckAndMutateRowRequest(proto.Message): + r"""Request message for Bigtable.CheckAndMutateRow. + + Attributes: + table_name (str): + Required. The unique name of the table to which the + conditional mutation should be applied. Values are of the + form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + row_key (bytes): + Required. The key of the row to which the + conditional mutation should be applied. + predicate_filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to be applied to the contents of the specified + row. Depending on whether or not any results are yielded, + either ``true_mutations`` or ``false_mutations`` will be + executed. If unset, checks that the row contains any values + at all. + true_mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): + Changes to be atomically applied to the specified row if + ``predicate_filter`` yields at least one cell when applied + to ``row_key``. Entries are applied in order, meaning that + earlier mutations can be masked by later ones. Must contain + at least one entry if ``false_mutations`` is empty, and at + most 100000. + false_mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): + Changes to be atomically applied to the specified row if + ``predicate_filter`` does not yield any cells when applied + to ``row_key``. Entries are applied in order, meaning that + earlier mutations can be masked by later ones. Must contain + at least one entry if ``true_mutations`` is empty, and at + most 100000. + """ + + table_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=7, + ) + row_key: bytes = proto.Field( + proto.BYTES, + number=2, + ) + predicate_filter: data.RowFilter = proto.Field( + proto.MESSAGE, + number=6, + message=data.RowFilter, + ) + true_mutations: MutableSequence[data.Mutation] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=data.Mutation, + ) + false_mutations: MutableSequence[data.Mutation] = proto.RepeatedField( + proto.MESSAGE, + number=5, + message=data.Mutation, + ) + + +class CheckAndMutateRowResponse(proto.Message): + r"""Response message for Bigtable.CheckAndMutateRow. + + Attributes: + predicate_matched (bool): + Whether or not the request's ``predicate_filter`` yielded + any results for the specified row. + """ + + predicate_matched: bool = proto.Field( + proto.BOOL, + number=1, + ) + + +class PingAndWarmRequest(proto.Message): + r"""Request message for client connection keep-alive and warming. + + Attributes: + name (str): + Required. The unique name of the instance to check + permissions for as well as respond. Values are of the form + ``projects//instances/``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=2, + ) + + +class PingAndWarmResponse(proto.Message): + r"""Response message for Bigtable.PingAndWarm connection + keepalive and warming. + + """ + + +class ReadModifyWriteRowRequest(proto.Message): + r"""Request message for Bigtable.ReadModifyWriteRow. + + Attributes: + table_name (str): + Required. The unique name of the table to which the + read/modify/write rules should be applied. Values are of the + form + ``projects//instances//tables/
``. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. + row_key (bytes): + Required. The key of the row to which the + read/modify/write rules should be applied. + rules (MutableSequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]): + Required. Rules specifying how the specified + row's contents are to be transformed into + writes. Entries are applied in order, meaning + that earlier rules will affect the results of + later ones. + """ + + table_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=4, + ) + row_key: bytes = proto.Field( + proto.BYTES, + number=2, + ) + rules: MutableSequence[data.ReadModifyWriteRule] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=data.ReadModifyWriteRule, + ) + + +class ReadModifyWriteRowResponse(proto.Message): + r"""Response message for Bigtable.ReadModifyWriteRow. + + Attributes: + row (google.cloud.bigtable_v2.types.Row): + A Row containing the new contents of all + cells modified by the request. + """ + + row: data.Row = proto.Field( + proto.MESSAGE, + number=1, + message=data.Row, + ) + + +class GenerateInitialChangeStreamPartitionsRequest(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. Request message for + Bigtable.GenerateInitialChangeStreamPartitions. + + Attributes: + table_name (str): + Required. The unique name of the table from which to get + change stream partitions. Values are of the form + ``projects//instances//tables/
``. + Change streaming must be enabled on the table. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. Single cluster routing + must be configured on the profile. + """ + + table_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GenerateInitialChangeStreamPartitionsResponse(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. Response message for + Bigtable.GenerateInitialChangeStreamPartitions. + + Attributes: + partition (google.cloud.bigtable_v2.types.StreamPartition): + A partition of the change stream. + """ + + partition: data.StreamPartition = proto.Field( + proto.MESSAGE, + number=1, + message=data.StreamPartition, + ) + + +class ReadChangeStreamRequest(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. Request message for Bigtable.ReadChangeStream. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + table_name (str): + Required. The unique name of the table from which to read a + change stream. Values are of the form + ``projects//instances//tables/
``. + Change streaming must be enabled on the table. + app_profile_id (str): + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. Single cluster routing + must be configured on the profile. + partition (google.cloud.bigtable_v2.types.StreamPartition): + The partition to read changes from. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Start reading the stream at the specified + timestamp. This timestamp must be within the + change stream retention period, less than or + equal to the current time, and after change + stream creation, whichever is greater. This + value is inclusive and will be truncated to + microsecond granularity. + + This field is a member of `oneof`_ ``start_from``. + continuation_tokens (google.cloud.bigtable_v2.types.StreamContinuationTokens): + Tokens that describe how to resume reading a stream where + reading previously left off. If specified, changes will be + read starting at the the position. Tokens are delivered on + the stream as part of ``Heartbeat`` and ``CloseStream`` + messages. + + If a single token is provided, the token’s partition must + exactly match the request’s partition. If multiple tokens + are provided, as in the case of a partition merge, the union + of the token partitions must exactly cover the request’s + partition. Otherwise, INVALID_ARGUMENT will be returned. + + This field is a member of `oneof`_ ``start_from``. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If specified, OK will be returned when the + stream advances beyond this time. Otherwise, + changes will be continuously delivered on the + stream. This value is inclusive and will be + truncated to microsecond granularity. + heartbeat_duration (google.protobuf.duration_pb2.Duration): + If specified, the duration between ``Heartbeat`` messages on + the stream. Otherwise, defaults to 5 seconds. + """ + + table_name: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=2, + ) + partition: data.StreamPartition = proto.Field( + proto.MESSAGE, + number=3, + message=data.StreamPartition, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + oneof='start_from', + message=timestamp_pb2.Timestamp, + ) + continuation_tokens: data.StreamContinuationTokens = proto.Field( + proto.MESSAGE, + number=6, + oneof='start_from', + message=data.StreamContinuationTokens, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + heartbeat_duration: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=7, + message=duration_pb2.Duration, + ) + + +class ReadChangeStreamResponse(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. Response message for Bigtable.ReadChangeStream. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + data_change (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.DataChange): + A mutation to the partition. + + This field is a member of `oneof`_ ``stream_record``. + heartbeat (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.Heartbeat): + A periodic heartbeat message. + + This field is a member of `oneof`_ ``stream_record``. + close_stream (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.CloseStream): + An indication that the stream should be + closed. + + This field is a member of `oneof`_ ``stream_record``. + """ + + class MutationChunk(proto.Message): + r"""A partial or complete mutation. + + Attributes: + chunk_info (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.MutationChunk.ChunkInfo): + If set, then the mutation is a ``SetCell`` with a chunked + value across multiple messages. + mutation (google.cloud.bigtable_v2.types.Mutation): + If this is a continuation of a chunked message + (``chunked_value_offset`` > 0), ignore all fields except the + ``SetCell``'s value and merge it with the previous message + by concatenating the value fields. + """ + + class ChunkInfo(proto.Message): + r"""Information about the chunking of this mutation. Only ``SetCell`` + mutations can be chunked, and all chunks for a ``SetCell`` will be + delivered contiguously with no other mutation types interleaved. + + Attributes: + chunked_value_size (int): + The total value size of all the chunks that make up the + ``SetCell``. + chunked_value_offset (int): + The byte offset of this chunk into the total + value size of the mutation. + last_chunk (bool): + When true, this is the last chunk of a chunked ``SetCell``. + """ + + chunked_value_size: int = proto.Field( + proto.INT32, + number=1, + ) + chunked_value_offset: int = proto.Field( + proto.INT32, + number=2, + ) + last_chunk: bool = proto.Field( + proto.BOOL, + number=3, + ) + + chunk_info: 'ReadChangeStreamResponse.MutationChunk.ChunkInfo' = proto.Field( + proto.MESSAGE, + number=1, + message='ReadChangeStreamResponse.MutationChunk.ChunkInfo', + ) + mutation: data.Mutation = proto.Field( + proto.MESSAGE, + number=2, + message=data.Mutation, + ) + + class DataChange(proto.Message): + r"""A message corresponding to one or more mutations to the partition + being streamed. A single logical ``DataChange`` message may also be + split across a sequence of multiple individual messages. Messages + other than the first in a sequence will only have the ``type`` and + ``chunks`` fields populated, with the final message in the sequence + also containing ``done`` set to true. + + Attributes: + type_ (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.DataChange.Type): + The type of the mutation. + source_cluster_id (str): + The cluster where the mutation was applied. Not set when + ``type`` is ``GARBAGE_COLLECTION``. + row_key (bytes): + The row key for all mutations that are part of this + ``DataChange``. If the ``DataChange`` is chunked across + multiple messages, then this field will only be set for the + first message. + commit_timestamp (google.protobuf.timestamp_pb2.Timestamp): + The timestamp at which the mutation was + applied on the Bigtable server. + tiebreaker (int): + A value that lets stream consumers reconstruct Bigtable's + conflict resolution semantics. + https://cloud.google.com/bigtable/docs/writes#conflict-resolution + In the event that the same row key, column family, column + qualifier, timestamp are modified on different clusters at + the same ``commit_timestamp``, the mutation with the larger + ``tiebreaker`` will be the one chosen for the eventually + consistent state of the system. + chunks (MutableSequence[google.cloud.bigtable_v2.types.ReadChangeStreamResponse.MutationChunk]): + The mutations associated with this change to the partition. + May contain complete mutations or chunks of a multi-message + chunked ``DataChange`` record. + done (bool): + When true, indicates that the entire ``DataChange`` has been + read and the client can safely process the message. + token (str): + An encoded position for this stream's + partition to restart reading from. This token is + for the StreamPartition from the request. + estimated_low_watermark (google.protobuf.timestamp_pb2.Timestamp): + An estimate of the commit timestamp that is + usually lower than or equal to any timestamp for + a record that will be delivered in the future on + the stream. It is possible that, under + particular circumstances that a future record + has a timestamp is is lower than a previously + seen timestamp. For an example usage see + https://beam.apache.org/documentation/basics/#watermarks + """ + class Type(proto.Enum): + r"""The type of mutation. + + Values: + TYPE_UNSPECIFIED (0): + The type is unspecified. + USER (1): + A user-initiated mutation. + GARBAGE_COLLECTION (2): + A system-initiated mutation as part of + garbage collection. + https://cloud.google.com/bigtable/docs/garbage-collection + CONTINUATION (3): + This is a continuation of a multi-message + change. + """ + TYPE_UNSPECIFIED = 0 + USER = 1 + GARBAGE_COLLECTION = 2 + CONTINUATION = 3 + + type_: 'ReadChangeStreamResponse.DataChange.Type' = proto.Field( + proto.ENUM, + number=1, + enum='ReadChangeStreamResponse.DataChange.Type', + ) + source_cluster_id: str = proto.Field( + proto.STRING, + number=2, + ) + row_key: bytes = proto.Field( + proto.BYTES, + number=3, + ) + commit_timestamp: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + tiebreaker: int = proto.Field( + proto.INT32, + number=5, + ) + chunks: MutableSequence['ReadChangeStreamResponse.MutationChunk'] = proto.RepeatedField( + proto.MESSAGE, + number=6, + message='ReadChangeStreamResponse.MutationChunk', + ) + done: bool = proto.Field( + proto.BOOL, + number=8, + ) + token: str = proto.Field( + proto.STRING, + number=9, + ) + estimated_low_watermark: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=10, + message=timestamp_pb2.Timestamp, + ) + + class Heartbeat(proto.Message): + r"""A periodic message with information that can be used to + checkpoint the state of a stream. + + Attributes: + continuation_token (google.cloud.bigtable_v2.types.StreamContinuationToken): + A token that can be provided to a subsequent + ``ReadChangeStream`` call to pick up reading at the current + stream position. + estimated_low_watermark (google.protobuf.timestamp_pb2.Timestamp): + An estimate of the commit timestamp that is + usually lower than or equal to any timestamp for + a record that will be delivered in the future on + the stream. It is possible that, under + particular circumstances that a future record + has a timestamp is is lower than a previously + seen timestamp. For an example usage see + https://beam.apache.org/documentation/basics/#watermarks + """ + + continuation_token: data.StreamContinuationToken = proto.Field( + proto.MESSAGE, + number=1, + message=data.StreamContinuationToken, + ) + estimated_low_watermark: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + + class CloseStream(proto.Message): + r"""A message indicating that the client should stop reading from the + stream. If status is OK and ``continuation_tokens`` & + ``new_partitions`` are empty, the stream has finished (for example + if there was an ``end_time`` specified). If ``continuation_tokens`` + & ``new_partitions`` are present, then a change in partitioning + requires the client to open a new stream for each token to resume + reading. Example: [B, D) ends \| v new_partitions: [A, C) [C, E) + continuation_tokens.partitions: [B,C) [C,D) ^---^ ^---^ ^ ^ \| \| \| + StreamContinuationToken 2 \| StreamContinuationToken 1 To read the + new partition [A,C), supply the continuation tokens whose ranges + cover the new partition, for example ContinuationToken[A,B) & + ContinuationToken[B,C). + + Attributes: + status (google.rpc.status_pb2.Status): + The status of the stream. + continuation_tokens (MutableSequence[google.cloud.bigtable_v2.types.StreamContinuationToken]): + If non-empty, contains the information needed + to resume reading their associated partitions. + new_partitions (MutableSequence[google.cloud.bigtable_v2.types.StreamPartition]): + If non-empty, contains the new partitions to start reading + from, which are related to but not necessarily identical to + the partitions for the above ``continuation_tokens``. + """ + + status: status_pb2.Status = proto.Field( + proto.MESSAGE, + number=1, + message=status_pb2.Status, + ) + continuation_tokens: MutableSequence[data.StreamContinuationToken] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=data.StreamContinuationToken, + ) + new_partitions: MutableSequence[data.StreamPartition] = proto.RepeatedField( + proto.MESSAGE, + number=3, + message=data.StreamPartition, + ) + + data_change: DataChange = proto.Field( + proto.MESSAGE, + number=1, + oneof='stream_record', + message=DataChange, + ) + heartbeat: Heartbeat = proto.Field( + proto.MESSAGE, + number=2, + oneof='stream_record', + message=Heartbeat, + ) + close_stream: CloseStream = proto.Field( + proto.MESSAGE, + number=3, + oneof='stream_record', + message=CloseStream, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/data.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/data.py new file mode 100644 index 000000000..0211a971e --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/data.py @@ -0,0 +1,1102 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.bigtable.v2', + manifest={ + 'Row', + 'Family', + 'Column', + 'Cell', + 'RowRange', + 'RowSet', + 'ColumnRange', + 'TimestampRange', + 'ValueRange', + 'RowFilter', + 'Mutation', + 'ReadModifyWriteRule', + 'StreamPartition', + 'StreamContinuationTokens', + 'StreamContinuationToken', + }, +) + + +class Row(proto.Message): + r"""Specifies the complete (requested) contents of a single row + of a table. Rows which exceed 256MiB in size cannot be read in + full. + + Attributes: + key (bytes): + The unique key which identifies this row + within its table. This is the same key that's + used to identify the row in, for example, a + MutateRowRequest. May contain any non-empty byte + string up to 4KiB in length. + families (MutableSequence[google.cloud.bigtable_v2.types.Family]): + May be empty, but only if the entire row is + empty. The mutual ordering of column families is + not specified. + """ + + key: bytes = proto.Field( + proto.BYTES, + number=1, + ) + families: MutableSequence['Family'] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='Family', + ) + + +class Family(proto.Message): + r"""Specifies (some of) the contents of a single row/column + family intersection of a table. + + Attributes: + name (str): + The unique key which identifies this family within its row. + This is the same key that's used to identify the family in, + for example, a RowFilter which sets its + "family_name_regex_filter" field. Must match + ``[-_.a-zA-Z0-9]+``, except that AggregatingRowProcessors + may produce cells in a sentinel family with an empty name. + Must be no greater than 64 characters in length. + columns (MutableSequence[google.cloud.bigtable_v2.types.Column]): + Must not be empty. Sorted in order of + increasing "qualifier". + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + columns: MutableSequence['Column'] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='Column', + ) + + +class Column(proto.Message): + r"""Specifies (some of) the contents of a single row/column + intersection of a table. + + Attributes: + qualifier (bytes): + The unique key which identifies this column within its + family. This is the same key that's used to identify the + column in, for example, a RowFilter which sets its + ``column_qualifier_regex_filter`` field. May contain any + byte string, including the empty string, up to 16kiB in + length. + cells (MutableSequence[google.cloud.bigtable_v2.types.Cell]): + Must not be empty. Sorted in order of decreasing + "timestamp_micros". + """ + + qualifier: bytes = proto.Field( + proto.BYTES, + number=1, + ) + cells: MutableSequence['Cell'] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='Cell', + ) + + +class Cell(proto.Message): + r"""Specifies (some of) the contents of a single + row/column/timestamp of a table. + + Attributes: + timestamp_micros (int): + The cell's stored timestamp, which also uniquely identifies + it within its column. Values are always expressed in + microseconds, but individual tables may set a coarser + granularity to further restrict the allowed values. For + example, a table which specifies millisecond granularity + will only allow values of ``timestamp_micros`` which are + multiples of 1000. + value (bytes): + The value stored in the cell. + May contain any byte string, including the empty + string, up to 100MiB in length. + labels (MutableSequence[str]): + Labels applied to the cell by a + [RowFilter][google.bigtable.v2.RowFilter]. + """ + + timestamp_micros: int = proto.Field( + proto.INT64, + number=1, + ) + value: bytes = proto.Field( + proto.BYTES, + number=2, + ) + labels: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +class RowRange(proto.Message): + r"""Specifies a contiguous range of rows. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + start_key_closed (bytes): + Used when giving an inclusive lower bound for + the range. + + This field is a member of `oneof`_ ``start_key``. + start_key_open (bytes): + Used when giving an exclusive lower bound for + the range. + + This field is a member of `oneof`_ ``start_key``. + end_key_open (bytes): + Used when giving an exclusive upper bound for + the range. + + This field is a member of `oneof`_ ``end_key``. + end_key_closed (bytes): + Used when giving an inclusive upper bound for + the range. + + This field is a member of `oneof`_ ``end_key``. + """ + + start_key_closed: bytes = proto.Field( + proto.BYTES, + number=1, + oneof='start_key', + ) + start_key_open: bytes = proto.Field( + proto.BYTES, + number=2, + oneof='start_key', + ) + end_key_open: bytes = proto.Field( + proto.BYTES, + number=3, + oneof='end_key', + ) + end_key_closed: bytes = proto.Field( + proto.BYTES, + number=4, + oneof='end_key', + ) + + +class RowSet(proto.Message): + r"""Specifies a non-contiguous set of rows. + + Attributes: + row_keys (MutableSequence[bytes]): + Single rows included in the set. + row_ranges (MutableSequence[google.cloud.bigtable_v2.types.RowRange]): + Contiguous row ranges included in the set. + """ + + row_keys: MutableSequence[bytes] = proto.RepeatedField( + proto.BYTES, + number=1, + ) + row_ranges: MutableSequence['RowRange'] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='RowRange', + ) + + +class ColumnRange(proto.Message): + r"""Specifies a contiguous range of columns within a single column + family. The range spans from : to + :, where both bounds can be either + inclusive or exclusive. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + family_name (str): + The name of the column family within which + this range falls. + start_qualifier_closed (bytes): + Used when giving an inclusive lower bound for + the range. + + This field is a member of `oneof`_ ``start_qualifier``. + start_qualifier_open (bytes): + Used when giving an exclusive lower bound for + the range. + + This field is a member of `oneof`_ ``start_qualifier``. + end_qualifier_closed (bytes): + Used when giving an inclusive upper bound for + the range. + + This field is a member of `oneof`_ ``end_qualifier``. + end_qualifier_open (bytes): + Used when giving an exclusive upper bound for + the range. + + This field is a member of `oneof`_ ``end_qualifier``. + """ + + family_name: str = proto.Field( + proto.STRING, + number=1, + ) + start_qualifier_closed: bytes = proto.Field( + proto.BYTES, + number=2, + oneof='start_qualifier', + ) + start_qualifier_open: bytes = proto.Field( + proto.BYTES, + number=3, + oneof='start_qualifier', + ) + end_qualifier_closed: bytes = proto.Field( + proto.BYTES, + number=4, + oneof='end_qualifier', + ) + end_qualifier_open: bytes = proto.Field( + proto.BYTES, + number=5, + oneof='end_qualifier', + ) + + +class TimestampRange(proto.Message): + r"""Specified a contiguous range of microsecond timestamps. + + Attributes: + start_timestamp_micros (int): + Inclusive lower bound. If left empty, + interpreted as 0. + end_timestamp_micros (int): + Exclusive upper bound. If left empty, + interpreted as infinity. + """ + + start_timestamp_micros: int = proto.Field( + proto.INT64, + number=1, + ) + end_timestamp_micros: int = proto.Field( + proto.INT64, + number=2, + ) + + +class ValueRange(proto.Message): + r"""Specifies a contiguous range of raw byte values. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + start_value_closed (bytes): + Used when giving an inclusive lower bound for + the range. + + This field is a member of `oneof`_ ``start_value``. + start_value_open (bytes): + Used when giving an exclusive lower bound for + the range. + + This field is a member of `oneof`_ ``start_value``. + end_value_closed (bytes): + Used when giving an inclusive upper bound for + the range. + + This field is a member of `oneof`_ ``end_value``. + end_value_open (bytes): + Used when giving an exclusive upper bound for + the range. + + This field is a member of `oneof`_ ``end_value``. + """ + + start_value_closed: bytes = proto.Field( + proto.BYTES, + number=1, + oneof='start_value', + ) + start_value_open: bytes = proto.Field( + proto.BYTES, + number=2, + oneof='start_value', + ) + end_value_closed: bytes = proto.Field( + proto.BYTES, + number=3, + oneof='end_value', + ) + end_value_open: bytes = proto.Field( + proto.BYTES, + number=4, + oneof='end_value', + ) + + +class RowFilter(proto.Message): + r"""Takes a row as input and produces an alternate view of the row based + on specified rules. For example, a RowFilter might trim down a row + to include just the cells from columns matching a given regular + expression, or might return all the cells of a row but not their + values. More complicated filters can be composed out of these + components to express requests such as, "within every column of a + particular family, give just the two most recent cells which are + older than timestamp X." + + There are two broad categories of RowFilters (true filters and + transformers), as well as two ways to compose simple filters into + more complex ones (chains and interleaves). They work as follows: + + - True filters alter the input row by excluding some of its cells + wholesale from the output row. An example of a true filter is the + ``value_regex_filter``, which excludes cells whose values don't + match the specified pattern. All regex true filters use RE2 + syntax (https://github.com/google/re2/wiki/Syntax) in raw byte + mode (RE2::Latin1), and are evaluated as full matches. An + important point to keep in mind is that ``RE2(.)`` is equivalent + by default to ``RE2([^\n])``, meaning that it does not match + newlines. When attempting to match an arbitrary byte, you should + therefore use the escape sequence ``\C``, which may need to be + further escaped as ``\\C`` in your client language. + + - Transformers alter the input row by changing the values of some + of its cells in the output, without excluding them completely. + Currently, the only supported transformer is the + ``strip_value_transformer``, which replaces every cell's value + with the empty string. + + - Chains and interleaves are described in more detail in the + RowFilter.Chain and RowFilter.Interleave documentation. + + The total serialized size of a RowFilter message must not exceed + 20480 bytes, and RowFilters may not be nested within each other (in + Chains or Interleaves) to a depth of more than 20. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + chain (google.cloud.bigtable_v2.types.RowFilter.Chain): + Applies several RowFilters to the data in + sequence, progressively narrowing the results. + + This field is a member of `oneof`_ ``filter``. + interleave (google.cloud.bigtable_v2.types.RowFilter.Interleave): + Applies several RowFilters to the data in + parallel and combines the results. + + This field is a member of `oneof`_ ``filter``. + condition (google.cloud.bigtable_v2.types.RowFilter.Condition): + Applies one of two possible RowFilters to the + data based on the output of a predicate + RowFilter. + + This field is a member of `oneof`_ ``filter``. + sink (bool): + ADVANCED USE ONLY. Hook for introspection into the + RowFilter. Outputs all cells directly to the output of the + read rather than to any parent filter. Consider the + following example: + + :: + + Chain( + FamilyRegex("A"), + Interleave( + All(), + Chain(Label("foo"), Sink()) + ), + QualifierRegex("B") + ) + + A,A,1,w + A,B,2,x + B,B,4,z + | + FamilyRegex("A") + | + A,A,1,w + A,B,2,x + | + +------------+-------------+ + | | + All() Label(foo) + | | + A,A,1,w A,A,1,w,labels:[foo] + A,B,2,x A,B,2,x,labels:[foo] + | | + | Sink() --------------+ + | | | + +------------+ x------+ A,A,1,w,labels:[foo] + | A,B,2,x,labels:[foo] + A,A,1,w | + A,B,2,x | + | | + QualifierRegex("B") | + | | + A,B,2,x | + | | + +--------------------------------+ + | + A,A,1,w,labels:[foo] + A,B,2,x,labels:[foo] // could be switched + A,B,2,x // could be switched + + Despite being excluded by the qualifier filter, a copy of + every cell that reaches the sink is present in the final + result. + + As with an + [Interleave][google.bigtable.v2.RowFilter.Interleave], + duplicate cells are possible, and appear in an unspecified + mutual order. In this case we have a duplicate with column + "A:B" and timestamp 2, because one copy passed through the + all filter while the other was passed through the label and + sink. Note that one copy has label "foo", while the other + does not. + + Cannot be used within the ``predicate_filter``, + ``true_filter``, or ``false_filter`` of a + [Condition][google.bigtable.v2.RowFilter.Condition]. + + This field is a member of `oneof`_ ``filter``. + pass_all_filter (bool): + Matches all cells, regardless of input. Functionally + equivalent to leaving ``filter`` unset, but included for + completeness. + + This field is a member of `oneof`_ ``filter``. + block_all_filter (bool): + Does not match any cells, regardless of + input. Useful for temporarily disabling just + part of a filter. + + This field is a member of `oneof`_ ``filter``. + row_key_regex_filter (bytes): + Matches only cells from rows whose keys satisfy the given + RE2 regex. In other words, passes through the entire row + when the key matches, and otherwise produces an empty row. + Note that, since row keys can contain arbitrary bytes, the + ``\C`` escape sequence must be used if a true wildcard is + desired. The ``.`` character will not match the new line + character ``\n``, which may be present in a binary key. + + This field is a member of `oneof`_ ``filter``. + row_sample_filter (float): + Matches all cells from a row with probability + p, and matches no cells from the row with + probability 1-p. + + This field is a member of `oneof`_ ``filter``. + family_name_regex_filter (str): + Matches only cells from columns whose families satisfy the + given RE2 regex. For technical reasons, the regex must not + contain the ``:`` character, even if it is not being used as + a literal. Note that, since column families cannot contain + the new line character ``\n``, it is sufficient to use ``.`` + as a full wildcard when matching column family names. + + This field is a member of `oneof`_ ``filter``. + column_qualifier_regex_filter (bytes): + Matches only cells from columns whose qualifiers satisfy the + given RE2 regex. Note that, since column qualifiers can + contain arbitrary bytes, the ``\C`` escape sequence must be + used if a true wildcard is desired. The ``.`` character will + not match the new line character ``\n``, which may be + present in a binary qualifier. + + This field is a member of `oneof`_ ``filter``. + column_range_filter (google.cloud.bigtable_v2.types.ColumnRange): + Matches only cells from columns within the + given range. + + This field is a member of `oneof`_ ``filter``. + timestamp_range_filter (google.cloud.bigtable_v2.types.TimestampRange): + Matches only cells with timestamps within the + given range. + + This field is a member of `oneof`_ ``filter``. + value_regex_filter (bytes): + Matches only cells with values that satisfy the given + regular expression. Note that, since cell values can contain + arbitrary bytes, the ``\C`` escape sequence must be used if + a true wildcard is desired. The ``.`` character will not + match the new line character ``\n``, which may be present in + a binary value. + + This field is a member of `oneof`_ ``filter``. + value_range_filter (google.cloud.bigtable_v2.types.ValueRange): + Matches only cells with values that fall + within the given range. + + This field is a member of `oneof`_ ``filter``. + cells_per_row_offset_filter (int): + Skips the first N cells of each row, matching + all subsequent cells. If duplicate cells are + present, as is possible when using an + Interleave, each copy of the cell is counted + separately. + + This field is a member of `oneof`_ ``filter``. + cells_per_row_limit_filter (int): + Matches only the first N cells of each row. + If duplicate cells are present, as is possible + when using an Interleave, each copy of the cell + is counted separately. + + This field is a member of `oneof`_ ``filter``. + cells_per_column_limit_filter (int): + Matches only the most recent N cells within each column. For + example, if N=2, this filter would match column ``foo:bar`` + at timestamps 10 and 9, skip all earlier cells in + ``foo:bar``, and then begin matching again in column + ``foo:bar2``. If duplicate cells are present, as is possible + when using an Interleave, each copy of the cell is counted + separately. + + This field is a member of `oneof`_ ``filter``. + strip_value_transformer (bool): + Replaces each cell's value with the empty + string. + + This field is a member of `oneof`_ ``filter``. + apply_label_transformer (str): + Applies the given label to all cells in the output row. This + allows the client to determine which results were produced + from which part of the filter. + + Values must be at most 15 characters in length, and match + the RE2 pattern ``[a-z0-9\\-]+`` + + Due to a technical limitation, it is not currently possible + to apply multiple labels to a cell. As a result, a Chain may + have no more than one sub-filter which contains a + ``apply_label_transformer``. It is okay for an Interleave to + contain multiple ``apply_label_transformers``, as they will + be applied to separate copies of the input. This may be + relaxed in the future. + + This field is a member of `oneof`_ ``filter``. + """ + + class Chain(proto.Message): + r"""A RowFilter which sends rows through several RowFilters in + sequence. + + Attributes: + filters (MutableSequence[google.cloud.bigtable_v2.types.RowFilter]): + The elements of "filters" are chained + together to process the input row: in row -> + f(0) -> intermediate row -> f(1) -> ... -> f(N) + -> out row The full chain is executed + atomically. + """ + + filters: MutableSequence['RowFilter'] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='RowFilter', + ) + + class Interleave(proto.Message): + r"""A RowFilter which sends each row to each of several component + RowFilters and interleaves the results. + + Attributes: + filters (MutableSequence[google.cloud.bigtable_v2.types.RowFilter]): + The elements of "filters" all process a copy of the input + row, and the results are pooled, sorted, and combined into a + single output row. If multiple cells are produced with the + same column and timestamp, they will all appear in the + output row in an unspecified mutual order. Consider the + following example, with three filters: + + :: + + input row + | + ----------------------------------------------------- + | | | + f(0) f(1) f(2) + | | | + 1: foo,bar,10,x foo,bar,10,z far,bar,7,a + 2: foo,blah,11,z far,blah,5,x far,blah,5,x + | | | + ----------------------------------------------------- + | + 1: foo,bar,10,z // could have switched with #2 + 2: foo,bar,10,x // could have switched with #1 + 3: foo,blah,11,z + 4: far,bar,7,a + 5: far,blah,5,x // identical to #6 + 6: far,blah,5,x // identical to #5 + + All interleaved filters are executed atomically. + """ + + filters: MutableSequence['RowFilter'] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='RowFilter', + ) + + class Condition(proto.Message): + r"""A RowFilter which evaluates one of two possible RowFilters, + depending on whether or not a predicate RowFilter outputs any + cells from the input row. + + IMPORTANT NOTE: The predicate filter does not execute atomically + with the true and false filters, which may lead to inconsistent + or unexpected results. Additionally, Condition filters have poor + performance, especially when filters are set for the false + condition. + + Attributes: + predicate_filter (google.cloud.bigtable_v2.types.RowFilter): + If ``predicate_filter`` outputs any cells, then + ``true_filter`` will be evaluated on the input row. + Otherwise, ``false_filter`` will be evaluated. + true_filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to apply to the input row if ``predicate_filter`` + returns any results. If not provided, no results will be + returned in the true case. + false_filter (google.cloud.bigtable_v2.types.RowFilter): + The filter to apply to the input row if ``predicate_filter`` + does not return any results. If not provided, no results + will be returned in the false case. + """ + + predicate_filter: 'RowFilter' = proto.Field( + proto.MESSAGE, + number=1, + message='RowFilter', + ) + true_filter: 'RowFilter' = proto.Field( + proto.MESSAGE, + number=2, + message='RowFilter', + ) + false_filter: 'RowFilter' = proto.Field( + proto.MESSAGE, + number=3, + message='RowFilter', + ) + + chain: Chain = proto.Field( + proto.MESSAGE, + number=1, + oneof='filter', + message=Chain, + ) + interleave: Interleave = proto.Field( + proto.MESSAGE, + number=2, + oneof='filter', + message=Interleave, + ) + condition: Condition = proto.Field( + proto.MESSAGE, + number=3, + oneof='filter', + message=Condition, + ) + sink: bool = proto.Field( + proto.BOOL, + number=16, + oneof='filter', + ) + pass_all_filter: bool = proto.Field( + proto.BOOL, + number=17, + oneof='filter', + ) + block_all_filter: bool = proto.Field( + proto.BOOL, + number=18, + oneof='filter', + ) + row_key_regex_filter: bytes = proto.Field( + proto.BYTES, + number=4, + oneof='filter', + ) + row_sample_filter: float = proto.Field( + proto.DOUBLE, + number=14, + oneof='filter', + ) + family_name_regex_filter: str = proto.Field( + proto.STRING, + number=5, + oneof='filter', + ) + column_qualifier_regex_filter: bytes = proto.Field( + proto.BYTES, + number=6, + oneof='filter', + ) + column_range_filter: 'ColumnRange' = proto.Field( + proto.MESSAGE, + number=7, + oneof='filter', + message='ColumnRange', + ) + timestamp_range_filter: 'TimestampRange' = proto.Field( + proto.MESSAGE, + number=8, + oneof='filter', + message='TimestampRange', + ) + value_regex_filter: bytes = proto.Field( + proto.BYTES, + number=9, + oneof='filter', + ) + value_range_filter: 'ValueRange' = proto.Field( + proto.MESSAGE, + number=15, + oneof='filter', + message='ValueRange', + ) + cells_per_row_offset_filter: int = proto.Field( + proto.INT32, + number=10, + oneof='filter', + ) + cells_per_row_limit_filter: int = proto.Field( + proto.INT32, + number=11, + oneof='filter', + ) + cells_per_column_limit_filter: int = proto.Field( + proto.INT32, + number=12, + oneof='filter', + ) + strip_value_transformer: bool = proto.Field( + proto.BOOL, + number=13, + oneof='filter', + ) + apply_label_transformer: str = proto.Field( + proto.STRING, + number=19, + oneof='filter', + ) + + +class Mutation(proto.Message): + r"""Specifies a particular change to be made to the contents of a + row. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + set_cell (google.cloud.bigtable_v2.types.Mutation.SetCell): + Set a cell's value. + + This field is a member of `oneof`_ ``mutation``. + delete_from_column (google.cloud.bigtable_v2.types.Mutation.DeleteFromColumn): + Deletes cells from a column. + + This field is a member of `oneof`_ ``mutation``. + delete_from_family (google.cloud.bigtable_v2.types.Mutation.DeleteFromFamily): + Deletes cells from a column family. + + This field is a member of `oneof`_ ``mutation``. + delete_from_row (google.cloud.bigtable_v2.types.Mutation.DeleteFromRow): + Deletes cells from the entire row. + + This field is a member of `oneof`_ ``mutation``. + """ + + class SetCell(proto.Message): + r"""A Mutation which sets the value of the specified cell. + + Attributes: + family_name (str): + The name of the family into which new data should be + written. Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier (bytes): + The qualifier of the column into which new + data should be written. Can be any byte string, + including the empty string. + timestamp_micros (int): + The timestamp of the cell into which new data + should be written. Use -1 for current Bigtable + server time. Otherwise, the client should set + this value itself, noting that the default value + is a timestamp of zero if the field is left + unspecified. Values must match the granularity + of the table (e.g. micros, millis). + value (bytes): + The value to be written into the specified + cell. + """ + + family_name: str = proto.Field( + proto.STRING, + number=1, + ) + column_qualifier: bytes = proto.Field( + proto.BYTES, + number=2, + ) + timestamp_micros: int = proto.Field( + proto.INT64, + number=3, + ) + value: bytes = proto.Field( + proto.BYTES, + number=4, + ) + + class DeleteFromColumn(proto.Message): + r"""A Mutation which deletes cells from the specified column, + optionally restricting the deletions to a given timestamp range. + + Attributes: + family_name (str): + The name of the family from which cells should be deleted. + Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier (bytes): + The qualifier of the column from which cells + should be deleted. Can be any byte string, + including the empty string. + time_range (google.cloud.bigtable_v2.types.TimestampRange): + The range of timestamps within which cells + should be deleted. + """ + + family_name: str = proto.Field( + proto.STRING, + number=1, + ) + column_qualifier: bytes = proto.Field( + proto.BYTES, + number=2, + ) + time_range: 'TimestampRange' = proto.Field( + proto.MESSAGE, + number=3, + message='TimestampRange', + ) + + class DeleteFromFamily(proto.Message): + r"""A Mutation which deletes all cells from the specified column + family. + + Attributes: + family_name (str): + The name of the family from which cells should be deleted. + Must match ``[-_.a-zA-Z0-9]+`` + """ + + family_name: str = proto.Field( + proto.STRING, + number=1, + ) + + class DeleteFromRow(proto.Message): + r"""A Mutation which deletes all cells from the containing row. + """ + + set_cell: SetCell = proto.Field( + proto.MESSAGE, + number=1, + oneof='mutation', + message=SetCell, + ) + delete_from_column: DeleteFromColumn = proto.Field( + proto.MESSAGE, + number=2, + oneof='mutation', + message=DeleteFromColumn, + ) + delete_from_family: DeleteFromFamily = proto.Field( + proto.MESSAGE, + number=3, + oneof='mutation', + message=DeleteFromFamily, + ) + delete_from_row: DeleteFromRow = proto.Field( + proto.MESSAGE, + number=4, + oneof='mutation', + message=DeleteFromRow, + ) + + +class ReadModifyWriteRule(proto.Message): + r"""Specifies an atomic read/modify/write operation on the latest + value of the specified column. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + family_name (str): + The name of the family to which the read/modify/write should + be applied. Must match ``[-_.a-zA-Z0-9]+`` + column_qualifier (bytes): + The qualifier of the column to which the + read/modify/write should be applied. + Can be any byte string, including the empty + string. + append_value (bytes): + Rule specifying that ``append_value`` be appended to the + existing value. If the targeted cell is unset, it will be + treated as containing the empty string. + + This field is a member of `oneof`_ ``rule``. + increment_amount (int): + Rule specifying that ``increment_amount`` be added to the + existing value. If the targeted cell is unset, it will be + treated as containing a zero. Otherwise, the targeted cell + must contain an 8-byte value (interpreted as a 64-bit + big-endian signed integer), or the entire request will fail. + + This field is a member of `oneof`_ ``rule``. + """ + + family_name: str = proto.Field( + proto.STRING, + number=1, + ) + column_qualifier: bytes = proto.Field( + proto.BYTES, + number=2, + ) + append_value: bytes = proto.Field( + proto.BYTES, + number=3, + oneof='rule', + ) + increment_amount: int = proto.Field( + proto.INT64, + number=4, + oneof='rule', + ) + + +class StreamPartition(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam + BigtableIO. A partition of a change stream. + + Attributes: + row_range (google.cloud.bigtable_v2.types.RowRange): + The row range covered by this partition and is specified by + [``start_key_closed``, ``end_key_open``). + """ + + row_range: 'RowRange' = proto.Field( + proto.MESSAGE, + number=1, + message='RowRange', + ) + + +class StreamContinuationTokens(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. The + information required to continue reading the data from multiple + ``StreamPartitions`` from where a previous read left off. + + Attributes: + tokens (MutableSequence[google.cloud.bigtable_v2.types.StreamContinuationToken]): + List of continuation tokens. + """ + + tokens: MutableSequence['StreamContinuationToken'] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='StreamContinuationToken', + ) + + +class StreamContinuationToken(proto.Message): + r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. The + information required to continue reading the data from a + ``StreamPartition`` from where a previous read left off. + + Attributes: + partition (google.cloud.bigtable_v2.types.StreamPartition): + The partition that this token applies to. + token (str): + An encoded position in the stream to restart + reading from. + """ + + partition: 'StreamPartition' = proto.Field( + proto.MESSAGE, + number=1, + message='StreamPartition', + ) + token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/feature_flags.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/feature_flags.py new file mode 100644 index 000000000..2ad597536 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/feature_flags.py @@ -0,0 +1,82 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.bigtable.v2', + manifest={ + 'FeatureFlags', + }, +) + + +class FeatureFlags(proto.Message): + r"""Feature flags supported or enabled by a client. This is intended to + be sent as part of request metadata to assure the server that + certain behaviors are safe to enable. This proto is meant to be + serialized and websafe-base64 encoded under the + ``bigtable-features`` metadata key. The value will remain constant + for the lifetime of a client and due to HTTP2's HPACK compression, + the request overhead will be tiny. This is an internal + implementation detail and should not be used by end users directly. + + Attributes: + reverse_scans (bool): + Notify the server that the client supports + reverse scans. The server will reject + ReadRowsRequests with the reverse bit set when + this is absent. + mutate_rows_rate_limit (bool): + Notify the server that the client enables + batch write flow control by requesting + RateLimitInfo from MutateRowsResponse. Due to + technical reasons, this disables partial + retries. + mutate_rows_rate_limit2 (bool): + Notify the server that the client enables + batch write flow control by requesting + RateLimitInfo from MutateRowsResponse. With + partial retries enabled. + last_scanned_row_responses (bool): + Notify the server that the client supports the + last_scanned_row field in ReadRowsResponse for long-running + scans. + """ + + reverse_scans: bool = proto.Field( + proto.BOOL, + number=1, + ) + mutate_rows_rate_limit: bool = proto.Field( + proto.BOOL, + number=3, + ) + mutate_rows_rate_limit2: bool = proto.Field( + proto.BOOL, + number=5, + ) + last_scanned_row_responses: bool = proto.Field( + proto.BOOL, + number=4, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/request_stats.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/request_stats.py new file mode 100644 index 000000000..59ab626f5 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/request_stats.py @@ -0,0 +1,171 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.bigtable.v2', + manifest={ + 'ReadIterationStats', + 'RequestLatencyStats', + 'FullReadStatsView', + 'RequestStats', + }, +) + + +class ReadIterationStats(proto.Message): + r"""ReadIterationStats captures information about the iteration + of rows or cells over the course of a read, e.g. how many + results were scanned in a read operation versus the results + returned. + + Attributes: + rows_seen_count (int): + The rows seen (scanned) as part of the + request. This includes the count of rows + returned, as captured below. + rows_returned_count (int): + The rows returned as part of the request. + cells_seen_count (int): + The cells seen (scanned) as part of the + request. This includes the count of cells + returned, as captured below. + cells_returned_count (int): + The cells returned as part of the request. + """ + + rows_seen_count: int = proto.Field( + proto.INT64, + number=1, + ) + rows_returned_count: int = proto.Field( + proto.INT64, + number=2, + ) + cells_seen_count: int = proto.Field( + proto.INT64, + number=3, + ) + cells_returned_count: int = proto.Field( + proto.INT64, + number=4, + ) + + +class RequestLatencyStats(proto.Message): + r"""RequestLatencyStats provides a measurement of the latency of + the request as it interacts with different systems over its + lifetime, e.g. how long the request took to execute within a + frontend server. + + Attributes: + frontend_server_latency (google.protobuf.duration_pb2.Duration): + The latency measured by the frontend server + handling this request, from when the request was + received, to when this value is sent back in the + response. For more context on the component that + is measuring this latency, see: + https://cloud.google.com/bigtable/docs/overview + + Note: This value may be slightly shorter than + the value reported into aggregate latency + metrics in Monitoring for this request + (https://cloud.google.com/bigtable/docs/monitoring-instance) + as this value needs to be sent in the response + before the latency measurement including that + transmission is finalized. + + Note: This value includes the end-to-end latency + of contacting nodes in the targeted cluster, + e.g. measuring from when the first byte arrives + at the frontend server, to when this value is + sent back as the last value in the response, + including any latency incurred by contacting + nodes, waiting for results from nodes, and + finally sending results from nodes back to the + caller. + """ + + frontend_server_latency: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + + +class FullReadStatsView(proto.Message): + r"""FullReadStatsView captures all known information about a + read. + + Attributes: + read_iteration_stats (google.cloud.bigtable_v2.types.ReadIterationStats): + Iteration stats describe how efficient the + read is, e.g. comparing rows seen vs. rows + returned or cells seen vs cells returned can + provide an indication of read efficiency (the + higher the ratio of seen to retuned the better). + request_latency_stats (google.cloud.bigtable_v2.types.RequestLatencyStats): + Request latency stats describe the time taken + to complete a request, from the server side. + """ + + read_iteration_stats: 'ReadIterationStats' = proto.Field( + proto.MESSAGE, + number=1, + message='ReadIterationStats', + ) + request_latency_stats: 'RequestLatencyStats' = proto.Field( + proto.MESSAGE, + number=2, + message='RequestLatencyStats', + ) + + +class RequestStats(proto.Message): + r"""RequestStats is the container for additional information pertaining + to a single request, helpful for evaluating the performance of the + sent request. Currently, there are the following supported methods: + + - google.bigtable.v2.ReadRows + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + full_read_stats_view (google.cloud.bigtable_v2.types.FullReadStatsView): + Available with the + ReadRowsRequest.RequestStatsView.REQUEST_STATS_FULL view, + see package google.bigtable.v2. + + This field is a member of `oneof`_ ``stats_view``. + """ + + full_read_stats_view: 'FullReadStatsView' = proto.Field( + proto.MESSAGE, + number=1, + oneof='stats_view', + message='FullReadStatsView', + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/response_params.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/response_params.py new file mode 100644 index 000000000..7f1e4d8fa --- /dev/null +++ b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/response_params.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package='google.bigtable.v2', + manifest={ + 'ResponseParams', + }, +) + + +class ResponseParams(proto.Message): + r"""Response metadata proto This is an experimental feature that will be + used to get zone_id and cluster_id from response trailers to tag the + metrics. This should not be used by customers directly + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + zone_id (str): + The cloud bigtable zone associated with the + cluster. + + This field is a member of `oneof`_ ``_zone_id``. + cluster_id (str): + Identifier for a cluster that represents set + of bigtable resources. + + This field is a member of `oneof`_ ``_cluster_id``. + """ + + zone_id: str = proto.Field( + proto.STRING, + number=1, + optional=True, + ) + cluster_id: str = proto.Field( + proto.STRING, + number=2, + optional=True, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable/v2/mypy.ini b/owl-bot-staging/bigtable/v2/mypy.ini new file mode 100644 index 000000000..574c5aed3 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.7 +namespace_packages = True diff --git a/owl-bot-staging/bigtable/v2/noxfile.py b/owl-bot-staging/bigtable/v2/noxfile.py new file mode 100644 index 000000000..40920e96c --- /dev/null +++ b/owl-bot-staging/bigtable/v2/noxfile.py @@ -0,0 +1,184 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +ALL_PYTHON = [ + "3.7", + "3.8", + "3.9", + "3.10", + "3.11", +] + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + +BLACK_VERSION = "black==22.3.0" +BLACK_PATHS = ["docs", "google", "tests", "samples", "noxfile.py", "setup.py"] +DEFAULT_PYTHON_VERSION = "3.11" + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", + "blacken", + "lint", + "lint_setup_py", +] + +@nox.session(python=ALL_PYTHON) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'pytest-asyncio', 'asyncmock; python_version < "3.8"') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/bigtable_v2/', + '--cov=tests/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=ALL_PYTHON) +def mypy(session): + """Run the type checker.""" + session.install( + 'mypy', + 'types-requests', + 'types-protobuf' + ) + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx==7.0.1", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint(session): + """Run linters. + + Returns a failure if the linters find linting errors or sufficiently + serious code quality issues. + """ + session.install("flake8", BLACK_VERSION) + session.run( + "black", + "--check", + *BLACK_PATHS, + ) + session.run("flake8", "google", "tests", "samples") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def blacken(session): + """Run black. Format code to uniform standard.""" + session.install(BLACK_VERSION) + session.run( + "black", + *BLACK_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint_setup_py(session): + """Verify that setup.py is valid (including RST check).""" + session.install("docutils", "pygments") + session.run("python", "setup.py", "check", "--restructuredtext", "--strict") diff --git a/owl-bot-staging/bigtable/v2/scripts/fixup_bigtable_v2_keywords.py b/owl-bot-staging/bigtable/v2/scripts/fixup_bigtable_v2_keywords.py new file mode 100644 index 000000000..8d32e5b70 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/scripts/fixup_bigtable_v2_keywords.py @@ -0,0 +1,184 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class bigtableCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'check_and_mutate_row': ('table_name', 'row_key', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), + 'generate_initial_change_stream_partitions': ('table_name', 'app_profile_id', ), + 'mutate_row': ('table_name', 'row_key', 'mutations', 'app_profile_id', ), + 'mutate_rows': ('table_name', 'entries', 'app_profile_id', ), + 'ping_and_warm': ('name', 'app_profile_id', ), + 'read_change_stream': ('table_name', 'app_profile_id', 'partition', 'start_time', 'continuation_tokens', 'end_time', 'heartbeat_duration', ), + 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), + 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed', ), + 'sample_row_keys': ('table_name', 'app_profile_id', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=bigtableCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the bigtable client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/bigtable/v2/setup.py b/owl-bot-staging/bigtable/v2/setup.py new file mode 100644 index 000000000..66c85ceab --- /dev/null +++ b/owl-bot-staging/bigtable/v2/setup.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os + +import setuptools # type: ignore + +package_root = os.path.abspath(os.path.dirname(__file__)) + +name = 'google-cloud-bigtable' + + +description = "Google Cloud Bigtable API client library" + +version = {} +with open(os.path.join(package_root, 'google/cloud/bigtable/gapic_version.py')) as fp: + exec(fp.read(), version) +version = version["__version__"] + +if version[0] == "0": + release_status = "Development Status :: 4 - Beta" +else: + release_status = "Development Status :: 5 - Production/Stable" + +dependencies = [ + "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", + "proto-plus >= 1.22.0, <2.0.0dev", + "proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'", + "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", +] +url = "https://github.com/googleapis/python-bigtable" + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, "README.rst") +with io.open(readme_filename, encoding="utf-8") as readme_file: + readme = readme_file.read() + +packages = [ + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") +] + +namespaces = ["google", "google.cloud"] + +setuptools.setup( + name=name, + version=version, + description=description, + long_description=readme, + author="Google LLC", + author_email="googleapis-packages@google.com", + license="Apache 2.0", + url=url, + classifiers=[ + release_status, + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Operating System :: OS Independent", + "Topic :: Internet", + ], + platforms="Posix; MacOS X; Windows", + packages=packages, + python_requires=">=3.7", + namespace_packages=namespaces, + install_requires=dependencies, + include_package_data=True, + zip_safe=False, +) diff --git a/owl-bot-staging/bigtable/v2/testing/constraints-3.10.txt b/owl-bot-staging/bigtable/v2/testing/constraints-3.10.txt new file mode 100644 index 000000000..ed7f9aed2 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/testing/constraints-3.10.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/bigtable/v2/testing/constraints-3.11.txt b/owl-bot-staging/bigtable/v2/testing/constraints-3.11.txt new file mode 100644 index 000000000..ed7f9aed2 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/testing/constraints-3.11.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/bigtable/v2/testing/constraints-3.12.txt b/owl-bot-staging/bigtable/v2/testing/constraints-3.12.txt new file mode 100644 index 000000000..ed7f9aed2 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/testing/constraints-3.12.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/bigtable/v2/testing/constraints-3.7.txt b/owl-bot-staging/bigtable/v2/testing/constraints-3.7.txt new file mode 100644 index 000000000..6c44adfea --- /dev/null +++ b/owl-bot-staging/bigtable/v2/testing/constraints-3.7.txt @@ -0,0 +1,9 @@ +# This constraints file is used to check that lower bounds +# are correct in setup.py +# List all library dependencies and extras in this file. +# Pin the version to the lower bound. +# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", +# Then this file should have google-cloud-foo==1.14.0 +google-api-core==1.34.0 +proto-plus==1.22.0 +protobuf==3.19.5 diff --git a/owl-bot-staging/bigtable/v2/testing/constraints-3.8.txt b/owl-bot-staging/bigtable/v2/testing/constraints-3.8.txt new file mode 100644 index 000000000..ed7f9aed2 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/testing/constraints-3.8.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/bigtable/v2/testing/constraints-3.9.txt b/owl-bot-staging/bigtable/v2/testing/constraints-3.9.txt new file mode 100644 index 000000000..ed7f9aed2 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/testing/constraints-3.9.txt @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf diff --git a/owl-bot-staging/bigtable/v2/tests/__init__.py b/owl-bot-staging/bigtable/v2/tests/__init__.py new file mode 100644 index 000000000..1b4db446e --- /dev/null +++ b/owl-bot-staging/bigtable/v2/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/bigtable/v2/tests/unit/__init__.py b/owl-bot-staging/bigtable/v2/tests/unit/__init__.py new file mode 100644 index 000000000..1b4db446e --- /dev/null +++ b/owl-bot-staging/bigtable/v2/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/bigtable/v2/tests/unit/gapic/__init__.py b/owl-bot-staging/bigtable/v2/tests/unit/gapic/__init__.py new file mode 100644 index 000000000..1b4db446e --- /dev/null +++ b/owl-bot-staging/bigtable/v2/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/__init__.py b/owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/__init__.py new file mode 100644 index 000000000..1b4db446e --- /dev/null +++ b/owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/test_bigtable.py b/owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/test_bigtable.py new file mode 100644 index 000000000..7d90eb7a9 --- /dev/null +++ b/owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -0,0 +1,5644 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.bigtable_v2.services.bigtable import BigtableAsyncClient +from google.cloud.bigtable_v2.services.bigtable import BigtableClient +from google.cloud.bigtable_v2.services.bigtable import transports +from google.cloud.bigtable_v2.types import bigtable +from google.cloud.bigtable_v2.types import data +from google.cloud.bigtable_v2.types import request_stats +from google.oauth2 import service_account +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert BigtableClient._get_default_mtls_endpoint(None) is None + assert BigtableClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert BigtableClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert BigtableClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert BigtableClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert BigtableClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (BigtableClient, "grpc"), + (BigtableAsyncClient, "grpc_asyncio"), + (BigtableClient, "rest"), +]) +def test_bigtable_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'bigtable.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://bigtable.googleapis.com' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.BigtableGrpcTransport, "grpc"), + (transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.BigtableRestTransport, "rest"), +]) +def test_bigtable_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (BigtableClient, "grpc"), + (BigtableAsyncClient, "grpc_asyncio"), + (BigtableClient, "rest"), +]) +def test_bigtable_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'bigtable.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://bigtable.googleapis.com' + ) + + +def test_bigtable_client_get_transport_class(): + transport = BigtableClient.get_transport_class() + available_transports = [ + transports.BigtableGrpcTransport, + transports.BigtableRestTransport, + ] + assert transport in available_transports + + transport = BigtableClient.get_transport_class("grpc") + assert transport == transports.BigtableGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc"), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + (BigtableClient, transports.BigtableRestTransport, "rest"), +]) +@mock.patch.object(BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient)) +@mock.patch.object(BigtableAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableAsyncClient)) +def test_bigtable_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(BigtableClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(BigtableClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions(api_audience="https://language.googleapis.com") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com" + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc", "true"), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (BigtableClient, transports.BigtableGrpcTransport, "grpc", "false"), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio", "false"), + (BigtableClient, transports.BigtableRestTransport, "rest", "true"), + (BigtableClient, transports.BigtableRestTransport, "rest", "false"), +]) +@mock.patch.object(BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient)) +@mock.patch.object(BigtableAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_bigtable_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class", [ + BigtableClient, BigtableAsyncClient +]) +@mock.patch.object(BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient)) +@mock.patch.object(BigtableAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableAsyncClient)) +def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc"), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), + (BigtableClient, transports.BigtableRestTransport, "rest"), +]) +def test_bigtable_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc", grpc_helpers), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), + (BigtableClient, transports.BigtableRestTransport, "rest", None), +]) +def test_bigtable_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +def test_bigtable_client_client_options_from_dict(): + with mock.patch('google.cloud.bigtable_v2.services.bigtable.transports.BigtableGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = BigtableClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc", grpc_helpers), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_bigtable_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "bigtable.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/bigtable.data', + 'https://www.googleapis.com/auth/bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-bigtable.data', + 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + scopes=None, + default_host="bigtable.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.ReadRowsRequest, + dict, +]) +def test_read_rows(request_type, transport: str = 'grpc'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_rows), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadRowsResponse()]) + response = client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadRowsRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.ReadRowsResponse) + + +def test_read_rows_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_rows), + '__call__') as call: + client.read_rows() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadRowsRequest() + +@pytest.mark.asyncio +async def test_read_rows_async(transport: str = 'grpc_asyncio', request_type=bigtable.ReadRowsRequest): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_rows), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[bigtable.ReadRowsResponse()]) + response = await client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadRowsRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.ReadRowsResponse) + + +@pytest.mark.asyncio +async def test_read_rows_async_from_dict(): + await test_read_rows_async(request_type=dict) + +def test_read_rows_routing_parameters(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadRowsRequest(**{"table_name": "projects/sample1/instances/sample2/tables/sample3"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_rows), + '__call__') as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadRowsRequest(**{"app_profile_id": "sample1"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_rows), + '__call__') as call: + call.return_value = iter([bigtable.ReadRowsResponse()]) + client.read_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + + +def test_read_rows_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_rows), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadRowsResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_rows( + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + + +def test_read_rows_flattened_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_rows( + bigtable.ReadRowsRequest(), + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + +@pytest.mark.asyncio +async def test_read_rows_flattened_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_rows), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadRowsResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_rows( + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_read_rows_flattened_error_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_rows( + bigtable.ReadRowsRequest(), + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.SampleRowKeysRequest, + dict, +]) +def test_sample_row_keys(request_type, transport: str = 'grpc'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.sample_row_keys), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + response = client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.SampleRowKeysRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.SampleRowKeysResponse) + + +def test_sample_row_keys_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.sample_row_keys), + '__call__') as call: + client.sample_row_keys() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.SampleRowKeysRequest() + +@pytest.mark.asyncio +async def test_sample_row_keys_async(transport: str = 'grpc_asyncio', request_type=bigtable.SampleRowKeysRequest): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.sample_row_keys), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[bigtable.SampleRowKeysResponse()]) + response = await client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.SampleRowKeysRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.SampleRowKeysResponse) + + +@pytest.mark.asyncio +async def test_sample_row_keys_async_from_dict(): + await test_sample_row_keys_async(request_type=dict) + +def test_sample_row_keys_routing_parameters(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.SampleRowKeysRequest(**{"table_name": "projects/sample1/instances/sample2/tables/sample3"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.sample_row_keys), + '__call__') as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.SampleRowKeysRequest(**{"app_profile_id": "sample1"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.sample_row_keys), + '__call__') as call: + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + client.sample_row_keys(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + + +def test_sample_row_keys_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.sample_row_keys), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.sample_row_keys( + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + + +def test_sample_row_keys_flattened_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.sample_row_keys( + bigtable.SampleRowKeysRequest(), + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + +@pytest.mark.asyncio +async def test_sample_row_keys_flattened_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.sample_row_keys), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.SampleRowKeysResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.sample_row_keys( + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_sample_row_keys_flattened_error_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.sample_row_keys( + bigtable.SampleRowKeysRequest(), + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.MutateRowRequest, + dict, +]) +def test_mutate_row(request_type, transport: str = 'grpc'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_row), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.MutateRowResponse( + ) + response = client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.MutateRowRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.MutateRowResponse) + + +def test_mutate_row_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_row), + '__call__') as call: + client.mutate_row() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.MutateRowRequest() + +@pytest.mark.asyncio +async def test_mutate_row_async(transport: str = 'grpc_asyncio', request_type=bigtable.MutateRowRequest): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_row), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable.MutateRowResponse( + )) + response = await client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.MutateRowRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.MutateRowResponse) + + +@pytest.mark.asyncio +async def test_mutate_row_async_from_dict(): + await test_mutate_row_async(request_type=dict) + +def test_mutate_row_routing_parameters(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowRequest(**{"table_name": "projects/sample1/instances/sample2/tables/sample3"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_row), + '__call__') as call: + call.return_value = bigtable.MutateRowResponse() + client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowRequest(**{"app_profile_id": "sample1"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_row), + '__call__') as call: + call.return_value = bigtable.MutateRowResponse() + client.mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + + +def test_mutate_row_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_row), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.MutateRowResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.mutate_row( + table_name='table_name_value', + row_key=b'row_key_blob', + mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].row_key + mock_val = b'row_key_blob' + assert arg == mock_val + arg = args[0].mutations + mock_val = [data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))] + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + + +def test_mutate_row_flattened_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_row( + bigtable.MutateRowRequest(), + table_name='table_name_value', + row_key=b'row_key_blob', + mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + app_profile_id='app_profile_id_value', + ) + +@pytest.mark.asyncio +async def test_mutate_row_flattened_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_row), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.MutateRowResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable.MutateRowResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.mutate_row( + table_name='table_name_value', + row_key=b'row_key_blob', + mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].row_key + mock_val = b'row_key_blob' + assert arg == mock_val + arg = args[0].mutations + mock_val = [data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))] + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_mutate_row_flattened_error_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.mutate_row( + bigtable.MutateRowRequest(), + table_name='table_name_value', + row_key=b'row_key_blob', + mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + app_profile_id='app_profile_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.MutateRowsRequest, + dict, +]) +def test_mutate_rows(request_type, transport: str = 'grpc'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_rows), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.MutateRowsResponse()]) + response = client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.MutateRowsRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.MutateRowsResponse) + + +def test_mutate_rows_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_rows), + '__call__') as call: + client.mutate_rows() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.MutateRowsRequest() + +@pytest.mark.asyncio +async def test_mutate_rows_async(transport: str = 'grpc_asyncio', request_type=bigtable.MutateRowsRequest): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_rows), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[bigtable.MutateRowsResponse()]) + response = await client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.MutateRowsRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.MutateRowsResponse) + + +@pytest.mark.asyncio +async def test_mutate_rows_async_from_dict(): + await test_mutate_rows_async(request_type=dict) + +def test_mutate_rows_routing_parameters(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowsRequest(**{"table_name": "projects/sample1/instances/sample2/tables/sample3"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_rows), + '__call__') as call: + call.return_value = iter([bigtable.MutateRowsResponse()]) + client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.MutateRowsRequest(**{"app_profile_id": "sample1"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_rows), + '__call__') as call: + call.return_value = iter([bigtable.MutateRowsResponse()]) + client.mutate_rows(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + + +def test_mutate_rows_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_rows), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.MutateRowsResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.mutate_rows( + table_name='table_name_value', + entries=[bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')], + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].entries + mock_val = [bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')] + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + + +def test_mutate_rows_flattened_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_rows( + bigtable.MutateRowsRequest(), + table_name='table_name_value', + entries=[bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')], + app_profile_id='app_profile_id_value', + ) + +@pytest.mark.asyncio +async def test_mutate_rows_flattened_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.mutate_rows), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.MutateRowsResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.mutate_rows( + table_name='table_name_value', + entries=[bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')], + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].entries + mock_val = [bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')] + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_mutate_rows_flattened_error_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.mutate_rows( + bigtable.MutateRowsRequest(), + table_name='table_name_value', + entries=[bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')], + app_profile_id='app_profile_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.CheckAndMutateRowRequest, + dict, +]) +def test_check_and_mutate_row(request_type, transport: str = 'grpc'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + response = client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.CheckAndMutateRowRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.CheckAndMutateRowResponse) + assert response.predicate_matched is True + + +def test_check_and_mutate_row_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), + '__call__') as call: + client.check_and_mutate_row() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.CheckAndMutateRowRequest() + +@pytest.mark.asyncio +async def test_check_and_mutate_row_async(transport: str = 'grpc_asyncio', request_type=bigtable.CheckAndMutateRowRequest): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + )) + response = await client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.CheckAndMutateRowRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.CheckAndMutateRowResponse) + assert response.predicate_matched is True + + +@pytest.mark.asyncio +async def test_check_and_mutate_row_async_from_dict(): + await test_check_and_mutate_row_async(request_type=dict) + +def test_check_and_mutate_row_routing_parameters(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.CheckAndMutateRowRequest(**{"table_name": "projects/sample1/instances/sample2/tables/sample3"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), + '__call__') as call: + call.return_value = bigtable.CheckAndMutateRowResponse() + client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.CheckAndMutateRowRequest(**{"app_profile_id": "sample1"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), + '__call__') as call: + call.return_value = bigtable.CheckAndMutateRowResponse() + client.check_and_mutate_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + + +def test_check_and_mutate_row_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.CheckAndMutateRowResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.check_and_mutate_row( + table_name='table_name_value', + row_key=b'row_key_blob', + predicate_filter=data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])), + true_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + false_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].row_key + mock_val = b'row_key_blob' + assert arg == mock_val + arg = args[0].predicate_filter + mock_val = data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])) + assert arg == mock_val + arg = args[0].true_mutations + mock_val = [data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))] + assert arg == mock_val + arg = args[0].false_mutations + mock_val = [data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))] + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + + +def test_check_and_mutate_row_flattened_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_and_mutate_row( + bigtable.CheckAndMutateRowRequest(), + table_name='table_name_value', + row_key=b'row_key_blob', + predicate_filter=data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])), + true_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + false_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + app_profile_id='app_profile_id_value', + ) + +@pytest.mark.asyncio +async def test_check_and_mutate_row_flattened_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_and_mutate_row), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.CheckAndMutateRowResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable.CheckAndMutateRowResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.check_and_mutate_row( + table_name='table_name_value', + row_key=b'row_key_blob', + predicate_filter=data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])), + true_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + false_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].row_key + mock_val = b'row_key_blob' + assert arg == mock_val + arg = args[0].predicate_filter + mock_val = data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])) + assert arg == mock_val + arg = args[0].true_mutations + mock_val = [data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))] + assert arg == mock_val + arg = args[0].false_mutations + mock_val = [data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))] + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_check_and_mutate_row_flattened_error_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.check_and_mutate_row( + bigtable.CheckAndMutateRowRequest(), + table_name='table_name_value', + row_key=b'row_key_blob', + predicate_filter=data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])), + true_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + false_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + app_profile_id='app_profile_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.PingAndWarmRequest, + dict, +]) +def test_ping_and_warm(request_type, transport: str = 'grpc'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.ping_and_warm), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.PingAndWarmResponse( + ) + response = client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.PingAndWarmRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.PingAndWarmResponse) + + +def test_ping_and_warm_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.ping_and_warm), + '__call__') as call: + client.ping_and_warm() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.PingAndWarmRequest() + +@pytest.mark.asyncio +async def test_ping_and_warm_async(transport: str = 'grpc_asyncio', request_type=bigtable.PingAndWarmRequest): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.ping_and_warm), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable.PingAndWarmResponse( + )) + response = await client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.PingAndWarmRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.PingAndWarmResponse) + + +@pytest.mark.asyncio +async def test_ping_and_warm_async_from_dict(): + await test_ping_and_warm_async(request_type=dict) + +def test_ping_and_warm_routing_parameters(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.PingAndWarmRequest(**{"name": "projects/sample1/instances/sample2"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.ping_and_warm), + '__call__') as call: + call.return_value = bigtable.PingAndWarmResponse() + client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.PingAndWarmRequest(**{"app_profile_id": "sample1"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.ping_and_warm), + '__call__') as call: + call.return_value = bigtable.PingAndWarmResponse() + client.ping_and_warm(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + + +def test_ping_and_warm_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.ping_and_warm), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.PingAndWarmResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.ping_and_warm( + name='name_value', + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + + +def test_ping_and_warm_flattened_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.ping_and_warm( + bigtable.PingAndWarmRequest(), + name='name_value', + app_profile_id='app_profile_id_value', + ) + +@pytest.mark.asyncio +async def test_ping_and_warm_flattened_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.ping_and_warm), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.PingAndWarmResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable.PingAndWarmResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.ping_and_warm( + name='name_value', + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_ping_and_warm_flattened_error_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.ping_and_warm( + bigtable.PingAndWarmRequest(), + name='name_value', + app_profile_id='app_profile_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.ReadModifyWriteRowRequest, + dict, +]) +def test_read_modify_write_row(request_type, transport: str = 'grpc'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.ReadModifyWriteRowResponse( + ) + response = client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadModifyWriteRowRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadModifyWriteRowResponse) + + +def test_read_modify_write_row_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), + '__call__') as call: + client.read_modify_write_row() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadModifyWriteRowRequest() + +@pytest.mark.asyncio +async def test_read_modify_write_row_async(transport: str = 'grpc_asyncio', request_type=bigtable.ReadModifyWriteRowRequest): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable.ReadModifyWriteRowResponse( + )) + response = await client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadModifyWriteRowRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadModifyWriteRowResponse) + + +@pytest.mark.asyncio +async def test_read_modify_write_row_async_from_dict(): + await test_read_modify_write_row_async(request_type=dict) + +def test_read_modify_write_row_routing_parameters(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadModifyWriteRowRequest(**{"table_name": "projects/sample1/instances/sample2/tables/sample3"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), + '__call__') as call: + call.return_value = bigtable.ReadModifyWriteRowResponse() + client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadModifyWriteRowRequest(**{"app_profile_id": "sample1"}) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), + '__call__') as call: + call.return_value = bigtable.ReadModifyWriteRowResponse() + client.read_modify_write_row(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + _, _, kw = call.mock_calls[0] + # This test doesn't assert anything useful. + assert kw['metadata'] + + +def test_read_modify_write_row_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.ReadModifyWriteRowResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_modify_write_row( + table_name='table_name_value', + row_key=b'row_key_blob', + rules=[data.ReadModifyWriteRule(family_name='family_name_value')], + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].row_key + mock_val = b'row_key_blob' + assert arg == mock_val + arg = args[0].rules + mock_val = [data.ReadModifyWriteRule(family_name='family_name_value')] + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + + +def test_read_modify_write_row_flattened_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_modify_write_row( + bigtable.ReadModifyWriteRowRequest(), + table_name='table_name_value', + row_key=b'row_key_blob', + rules=[data.ReadModifyWriteRule(family_name='family_name_value')], + app_profile_id='app_profile_id_value', + ) + +@pytest.mark.asyncio +async def test_read_modify_write_row_flattened_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_modify_write_row), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable.ReadModifyWriteRowResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable.ReadModifyWriteRowResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_modify_write_row( + table_name='table_name_value', + row_key=b'row_key_blob', + rules=[data.ReadModifyWriteRule(family_name='family_name_value')], + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].row_key + mock_val = b'row_key_blob' + assert arg == mock_val + arg = args[0].rules + mock_val = [data.ReadModifyWriteRule(family_name='family_name_value')] + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_read_modify_write_row_flattened_error_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_modify_write_row( + bigtable.ReadModifyWriteRowRequest(), + table_name='table_name_value', + row_key=b'row_key_blob', + rules=[data.ReadModifyWriteRule(family_name='family_name_value')], + app_profile_id='app_profile_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.GenerateInitialChangeStreamPartitionsRequest, + dict, +]) +def test_generate_initial_change_stream_partitions(request_type, transport: str = 'grpc'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.GenerateInitialChangeStreamPartitionsResponse()]) + response = client.generate_initial_change_stream_partitions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.GenerateInitialChangeStreamPartitionsResponse) + + +def test_generate_initial_change_stream_partitions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), + '__call__') as call: + client.generate_initial_change_stream_partitions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() + +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_async(transport: str = 'grpc_asyncio', request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[bigtable.GenerateInitialChangeStreamPartitionsResponse()]) + response = await client.generate_initial_change_stream_partitions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.GenerateInitialChangeStreamPartitionsResponse) + + +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_async_from_dict(): + await test_generate_initial_change_stream_partitions_async(request_type=dict) + + +def test_generate_initial_change_stream_partitions_field_headers(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.GenerateInitialChangeStreamPartitionsRequest() + + request.table_name = 'table_name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), + '__call__') as call: + call.return_value = iter([bigtable.GenerateInitialChangeStreamPartitionsResponse()]) + client.generate_initial_change_stream_partitions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'table_name=table_name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_field_headers_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.GenerateInitialChangeStreamPartitionsRequest() + + request.table_name = 'table_name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), + '__call__') as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[bigtable.GenerateInitialChangeStreamPartitionsResponse()]) + await client.generate_initial_change_stream_partitions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'table_name=table_name_value', + ) in kw['metadata'] + + +def test_generate_initial_change_stream_partitions_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.GenerateInitialChangeStreamPartitionsResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.generate_initial_change_stream_partitions( + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + + +def test_generate_initial_change_stream_partitions_flattened_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_initial_change_stream_partitions( + bigtable.GenerateInitialChangeStreamPartitionsRequest(), + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_flattened_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_initial_change_stream_partitions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.GenerateInitialChangeStreamPartitionsResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.generate_initial_change_stream_partitions( + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_generate_initial_change_stream_partitions_flattened_error_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.generate_initial_change_stream_partitions( + bigtable.GenerateInitialChangeStreamPartitionsRequest(), + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.ReadChangeStreamRequest, + dict, +]) +def test_read_change_stream(request_type, transport: str = 'grpc'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadChangeStreamResponse()]) + response = client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadChangeStreamRequest() + + # Establish that the response is the type that we expect. + for message in response: + assert isinstance(message, bigtable.ReadChangeStreamResponse) + + +def test_read_change_stream_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), + '__call__') as call: + client.read_change_stream() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadChangeStreamRequest() + +@pytest.mark.asyncio +async def test_read_change_stream_async(transport: str = 'grpc_asyncio', request_type=bigtable.ReadChangeStreamRequest): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[bigtable.ReadChangeStreamResponse()]) + response = await client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable.ReadChangeStreamRequest() + + # Establish that the response is the type that we expect. + message = await response.read() + assert isinstance(message, bigtable.ReadChangeStreamResponse) + + +@pytest.mark.asyncio +async def test_read_change_stream_async_from_dict(): + await test_read_change_stream_async(request_type=dict) + + +def test_read_change_stream_field_headers(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadChangeStreamRequest() + + request.table_name = 'table_name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), + '__call__') as call: + call.return_value = iter([bigtable.ReadChangeStreamResponse()]) + client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'table_name=table_name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_read_change_stream_field_headers_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable.ReadChangeStreamRequest() + + request.table_name = 'table_name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), + '__call__') as call: + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + call.return_value.read = mock.AsyncMock(side_effect=[bigtable.ReadChangeStreamResponse()]) + await client.read_change_stream(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'table_name=table_name_value', + ) in kw['metadata'] + + +def test_read_change_stream_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadChangeStreamResponse()]) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.read_change_stream( + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + + +def test_read_change_stream_flattened_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_change_stream( + bigtable.ReadChangeStreamRequest(), + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + +@pytest.mark.asyncio +async def test_read_change_stream_flattened_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.read_change_stream), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iter([bigtable.ReadChangeStreamResponse()]) + + call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.read_change_stream( + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].table_name + mock_val = 'table_name_value' + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_read_change_stream_flattened_error_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.read_change_stream( + bigtable.ReadChangeStreamRequest(), + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.ReadRowsRequest, + dict, +]) +def test_read_rows_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadRowsResponse( + last_scanned_row_key=b'last_scanned_row_key_blob', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.read_rows(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadRowsResponse) + assert response.last_scanned_row_key == b'last_scanned_row_key_blob' + + +def test_read_rows_rest_required_fields(request_type=bigtable.ReadRowsRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).read_rows._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = 'table_name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).read_rows._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == 'table_name_value' + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadRowsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.read_rows(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_read_rows_rest_unset_required_fields(): + transport = transports.BigtableRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.read_rows._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("tableName", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_read_rows_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableRestInterceptor, "post_read_rows") as post, \ + mock.patch.object(transports.BigtableRestInterceptor, "pre_read_rows") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.ReadRowsRequest.pb(bigtable.ReadRowsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.ReadRowsResponse.to_json(bigtable.ReadRowsResponse()) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = bigtable.ReadRowsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.ReadRowsResponse() + + client.read_rows(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_read_rows_rest_bad_request(transport: str = 'rest', request_type=bigtable.ReadRowsRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.read_rows(request) + + +def test_read_rows_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadRowsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + client.read_rows(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:readRows" % client.transport._host, args[1]) + + +def test_read_rows_rest_flattened_error(transport: str = 'rest'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_rows( + bigtable.ReadRowsRequest(), + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + +def test_read_rows_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.SampleRowKeysRequest, + dict, +]) +def test_sample_row_keys_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.SampleRowKeysResponse( + row_key=b'row_key_blob', + offset_bytes=1293, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.sample_row_keys(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.SampleRowKeysResponse) + assert response.row_key == b'row_key_blob' + assert response.offset_bytes == 1293 + + +def test_sample_row_keys_rest_required_fields(request_type=bigtable.SampleRowKeysRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).sample_row_keys._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = 'table_name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).sample_row_keys._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("app_profile_id", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == 'table_name_value' + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.SampleRowKeysResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.sample_row_keys(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_sample_row_keys_rest_unset_required_fields(): + transport = transports.BigtableRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.sample_row_keys._get_unset_required_fields({}) + assert set(unset_fields) == (set(("appProfileId", )) & set(("tableName", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_sample_row_keys_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableRestInterceptor, "post_sample_row_keys") as post, \ + mock.patch.object(transports.BigtableRestInterceptor, "pre_sample_row_keys") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.SampleRowKeysRequest.pb(bigtable.SampleRowKeysRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.SampleRowKeysResponse.to_json(bigtable.SampleRowKeysResponse()) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = bigtable.SampleRowKeysRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.SampleRowKeysResponse() + + client.sample_row_keys(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_sample_row_keys_rest_bad_request(transport: str = 'rest', request_type=bigtable.SampleRowKeysRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.sample_row_keys(request) + + +def test_sample_row_keys_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.SampleRowKeysResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + client.sample_row_keys(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" % client.transport._host, args[1]) + + +def test_sample_row_keys_rest_flattened_error(transport: str = 'rest'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.sample_row_keys( + bigtable.SampleRowKeysRequest(), + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + +def test_sample_row_keys_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.MutateRowRequest, + dict, +]) +def test_mutate_row_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.mutate_row(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.MutateRowResponse) + + +def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request_init["row_key"] = b'' + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = 'table_name_value' + jsonified_request["rowKey"] = b'row_key_blob' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == 'table_name_value' + assert "rowKey" in jsonified_request + assert jsonified_request["rowKey"] == b'row_key_blob' + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.mutate_row(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_mutate_row_rest_unset_required_fields(): + transport = transports.BigtableRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.mutate_row._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("tableName", "rowKey", "mutations", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_mutate_row_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableRestInterceptor, "post_mutate_row") as post, \ + mock.patch.object(transports.BigtableRestInterceptor, "pre_mutate_row") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.MutateRowRequest.pb(bigtable.MutateRowRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.MutateRowResponse.to_json(bigtable.MutateRowResponse()) + + request = bigtable.MutateRowRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.MutateRowResponse() + + client.mutate_row(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_mutate_row_rest_bad_request(transport: str = 'rest', request_type=bigtable.MutateRowRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.mutate_row(request) + + +def test_mutate_row_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + table_name='table_name_value', + row_key=b'row_key_blob', + mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + app_profile_id='app_profile_id_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.mutate_row(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" % client.transport._host, args[1]) + + +def test_mutate_row_rest_flattened_error(transport: str = 'rest'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_row( + bigtable.MutateRowRequest(), + table_name='table_name_value', + row_key=b'row_key_blob', + mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + app_profile_id='app_profile_id_value', + ) + + +def test_mutate_row_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.MutateRowsRequest, + dict, +]) +def test_mutate_rows_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowsResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.mutate_rows(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.MutateRowsResponse) + + +def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).mutate_rows._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = 'table_name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).mutate_rows._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == 'table_name_value' + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.mutate_rows(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_mutate_rows_rest_unset_required_fields(): + transport = transports.BigtableRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.mutate_rows._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("tableName", "entries", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_mutate_rows_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableRestInterceptor, "post_mutate_rows") as post, \ + mock.patch.object(transports.BigtableRestInterceptor, "pre_mutate_rows") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.MutateRowsRequest.pb(bigtable.MutateRowsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.MutateRowsResponse.to_json(bigtable.MutateRowsResponse()) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = bigtable.MutateRowsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.MutateRowsResponse() + + client.mutate_rows(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_mutate_rows_rest_bad_request(transport: str = 'rest', request_type=bigtable.MutateRowsRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.mutate_rows(request) + + +def test_mutate_rows_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.MutateRowsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + table_name='table_name_value', + entries=[bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')], + app_profile_id='app_profile_id_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + client.mutate_rows(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" % client.transport._host, args[1]) + + +def test_mutate_rows_rest_flattened_error(transport: str = 'rest'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.mutate_rows( + bigtable.MutateRowsRequest(), + table_name='table_name_value', + entries=[bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')], + app_profile_id='app_profile_id_value', + ) + + +def test_mutate_rows_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.CheckAndMutateRowRequest, + dict, +]) +def test_check_and_mutate_row_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.CheckAndMutateRowResponse( + predicate_matched=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.check_and_mutate_row(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.CheckAndMutateRowResponse) + assert response.predicate_matched is True + + +def test_check_and_mutate_row_rest_required_fields(request_type=bigtable.CheckAndMutateRowRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request_init["row_key"] = b'' + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).check_and_mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = 'table_name_value' + jsonified_request["rowKey"] = b'row_key_blob' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).check_and_mutate_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == 'table_name_value' + assert "rowKey" in jsonified_request + assert jsonified_request["rowKey"] == b'row_key_blob' + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.CheckAndMutateRowResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.check_and_mutate_row(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_check_and_mutate_row_rest_unset_required_fields(): + transport = transports.BigtableRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.check_and_mutate_row._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("tableName", "rowKey", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_check_and_mutate_row_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableRestInterceptor, "post_check_and_mutate_row") as post, \ + mock.patch.object(transports.BigtableRestInterceptor, "pre_check_and_mutate_row") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.CheckAndMutateRowRequest.pb(bigtable.CheckAndMutateRowRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.CheckAndMutateRowResponse.to_json(bigtable.CheckAndMutateRowResponse()) + + request = bigtable.CheckAndMutateRowRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.CheckAndMutateRowResponse() + + client.check_and_mutate_row(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_check_and_mutate_row_rest_bad_request(transport: str = 'rest', request_type=bigtable.CheckAndMutateRowRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.check_and_mutate_row(request) + + +def test_check_and_mutate_row_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.CheckAndMutateRowResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + table_name='table_name_value', + row_key=b'row_key_blob', + predicate_filter=data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])), + true_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + false_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + app_profile_id='app_profile_id_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.check_and_mutate_row(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" % client.transport._host, args[1]) + + +def test_check_and_mutate_row_rest_flattened_error(transport: str = 'rest'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_and_mutate_row( + bigtable.CheckAndMutateRowRequest(), + table_name='table_name_value', + row_key=b'row_key_blob', + predicate_filter=data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])), + true_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + false_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], + app_profile_id='app_profile_id_value', + ) + + +def test_check_and_mutate_row_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.PingAndWarmRequest, + dict, +]) +def test_ping_and_warm_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.PingAndWarmResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.ping_and_warm(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.PingAndWarmResponse) + + +def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).ping_and_warm._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).ping_and_warm._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.PingAndWarmResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.ping_and_warm(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_ping_and_warm_rest_unset_required_fields(): + transport = transports.BigtableRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.ping_and_warm._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_ping_and_warm_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableRestInterceptor, "post_ping_and_warm") as post, \ + mock.patch.object(transports.BigtableRestInterceptor, "pre_ping_and_warm") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.PingAndWarmRequest.pb(bigtable.PingAndWarmRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.PingAndWarmResponse.to_json(bigtable.PingAndWarmResponse()) + + request = bigtable.PingAndWarmRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.PingAndWarmResponse() + + client.ping_and_warm(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_ping_and_warm_rest_bad_request(transport: str = 'rest', request_type=bigtable.PingAndWarmRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.ping_and_warm(request) + + +def test_ping_and_warm_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.PingAndWarmResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + app_profile_id='app_profile_id_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.ping_and_warm(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*}:ping" % client.transport._host, args[1]) + + +def test_ping_and_warm_rest_flattened_error(transport: str = 'rest'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.ping_and_warm( + bigtable.PingAndWarmRequest(), + name='name_value', + app_profile_id='app_profile_id_value', + ) + + +def test_ping_and_warm_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.ReadModifyWriteRowRequest, + dict, +]) +def test_read_modify_write_row_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadModifyWriteRowResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.read_modify_write_row(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadModifyWriteRowResponse) + + +def test_read_modify_write_row_rest_required_fields(request_type=bigtable.ReadModifyWriteRowRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request_init["row_key"] = b'' + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).read_modify_write_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = 'table_name_value' + jsonified_request["rowKey"] = b'row_key_blob' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).read_modify_write_row._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == 'table_name_value' + assert "rowKey" in jsonified_request + assert jsonified_request["rowKey"] == b'row_key_blob' + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadModifyWriteRowResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.read_modify_write_row(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_read_modify_write_row_rest_unset_required_fields(): + transport = transports.BigtableRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.read_modify_write_row._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("tableName", "rowKey", "rules", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_read_modify_write_row_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableRestInterceptor, "post_read_modify_write_row") as post, \ + mock.patch.object(transports.BigtableRestInterceptor, "pre_read_modify_write_row") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.ReadModifyWriteRowRequest.pb(bigtable.ReadModifyWriteRowRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.ReadModifyWriteRowResponse.to_json(bigtable.ReadModifyWriteRowResponse()) + + request = bigtable.ReadModifyWriteRowRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.ReadModifyWriteRowResponse() + + client.read_modify_write_row(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_read_modify_write_row_rest_bad_request(transport: str = 'rest', request_type=bigtable.ReadModifyWriteRowRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.read_modify_write_row(request) + + +def test_read_modify_write_row_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadModifyWriteRowResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + table_name='table_name_value', + row_key=b'row_key_blob', + rules=[data.ReadModifyWriteRule(family_name='family_name_value')], + app_profile_id='app_profile_id_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.read_modify_write_row(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" % client.transport._host, args[1]) + + +def test_read_modify_write_row_rest_flattened_error(transport: str = 'rest'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_modify_write_row( + bigtable.ReadModifyWriteRowRequest(), + table_name='table_name_value', + row_key=b'row_key_blob', + rules=[data.ReadModifyWriteRule(family_name='family_name_value')], + app_profile_id='app_profile_id_value', + ) + + +def test_read_modify_write_row_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.GenerateInitialChangeStreamPartitionsRequest, + dict, +]) +def test_generate_initial_change_stream_partitions_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.generate_initial_change_stream_partitions(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.GenerateInitialChangeStreamPartitionsResponse) + + +def test_generate_initial_change_stream_partitions_rest_required_fields(request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).generate_initial_change_stream_partitions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = 'table_name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).generate_initial_change_stream_partitions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == 'table_name_value' + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.generate_initial_change_stream_partitions(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_generate_initial_change_stream_partitions_rest_unset_required_fields(): + transport = transports.BigtableRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.generate_initial_change_stream_partitions._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("tableName", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_generate_initial_change_stream_partitions_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableRestInterceptor, "post_generate_initial_change_stream_partitions") as post, \ + mock.patch.object(transports.BigtableRestInterceptor, "pre_generate_initial_change_stream_partitions") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb(bigtable.GenerateInitialChangeStreamPartitionsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.GenerateInitialChangeStreamPartitionsResponse.to_json(bigtable.GenerateInitialChangeStreamPartitionsResponse()) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = bigtable.GenerateInitialChangeStreamPartitionsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + + client.generate_initial_change_stream_partitions(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_generate_initial_change_stream_partitions_rest_bad_request(transport: str = 'rest', request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.generate_initial_change_stream_partitions(request) + + +def test_generate_initial_change_stream_partitions_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + client.generate_initial_change_stream_partitions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions" % client.transport._host, args[1]) + + +def test_generate_initial_change_stream_partitions_rest_flattened_error(transport: str = 'rest'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_initial_change_stream_partitions( + bigtable.GenerateInitialChangeStreamPartitionsRequest(), + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + +def test_generate_initial_change_stream_partitions_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable.ReadChangeStreamRequest, + dict, +]) +def test_read_change_stream_rest(request_type): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadChangeStreamResponse( + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.read_change_stream(request) + + assert isinstance(response, Iterable) + response = next(response) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable.ReadChangeStreamResponse) + + +def test_read_change_stream_rest_required_fields(request_type=bigtable.ReadChangeStreamRequest): + transport_class = transports.BigtableRestTransport + + request_init = {} + request_init["table_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).read_change_stream._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["tableName"] = 'table_name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).read_change_stream._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "tableName" in jsonified_request + assert jsonified_request["tableName"] == 'table_name_value' + + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadChangeStreamResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + response = client.read_change_stream(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_read_change_stream_rest_unset_required_fields(): + transport = transports.BigtableRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.read_change_stream._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("tableName", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_read_change_stream_rest_interceptors(null_interceptor): + transport = transports.BigtableRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), + ) + client = BigtableClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableRestInterceptor, "post_read_change_stream") as post, \ + mock.patch.object(transports.BigtableRestInterceptor, "pre_read_change_stream") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable.ReadChangeStreamRequest.pb(bigtable.ReadChangeStreamRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable.ReadChangeStreamResponse.to_json(bigtable.ReadChangeStreamResponse()) + req.return_value._content = "[{}]".format(req.return_value._content) + + request = bigtable.ReadChangeStreamRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable.ReadChangeStreamResponse() + + client.read_change_stream(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_read_change_stream_rest_bad_request(transport: str = 'rest', request_type=bigtable.ReadChangeStreamRequest): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.read_change_stream(request) + + +def test_read_change_stream_rest_flattened(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable.ReadChangeStreamResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + json_return_value = "[{}]".format(json_return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + with mock.patch.object(response_value, 'iter_content') as iter_content: + iter_content.return_value = iter(json_return_value) + client.read_change_stream(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream" % client.transport._host, args[1]) + + +def test_read_change_stream_rest_flattened_error(transport: str = 'rest'): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.read_change_stream( + bigtable.ReadChangeStreamRequest(), + table_name='table_name_value', + app_profile_id='app_profile_id_value', + ) + + +def test_read_change_stream_rest_error(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BigtableClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.BigtableGrpcTransport, + transports.BigtableGrpcAsyncIOTransport, + transports.BigtableRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "rest", +]) +def test_transport_kind(transport_name): + transport = BigtableClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.BigtableGrpcTransport, + ) + +def test_bigtable_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.BigtableTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_bigtable_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.BigtableTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'read_rows', + 'sample_row_keys', + 'mutate_row', + 'mutate_rows', + 'check_and_mutate_row', + 'ping_and_warm', + 'read_modify_write_row', + 'generate_initial_change_stream_partitions', + 'read_change_stream', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_bigtable_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BigtableTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/bigtable.data', + 'https://www.googleapis.com/auth/bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-bigtable.data', + 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + quota_project_id="octopus", + ) + + +def test_bigtable_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BigtableTransport() + adc.assert_called_once() + + +def test_bigtable_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + BigtableClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/bigtable.data', + 'https://www.googleapis.com/auth/bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-bigtable.data', + 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableGrpcTransport, + transports.BigtableGrpcAsyncIOTransport, + ], +) +def test_bigtable_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/bigtable.data', 'https://www.googleapis.com/auth/bigtable.data.readonly', 'https://www.googleapis.com/auth/cloud-bigtable.data', 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/cloud-platform.read-only',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableGrpcTransport, + transports.BigtableGrpcAsyncIOTransport, + transports.BigtableRestTransport, + ], +) +def test_bigtable_transport_auth_gdch_credentials(transport_class): + host = 'https://language.com' + api_audience_tests = [None, 'https://language2.com'] + api_audience_expect = [host, 'https://language2.com'] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with( + e + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.BigtableGrpcTransport, grpc_helpers), + (transports.BigtableGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_bigtable_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "bigtable.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/bigtable.data', + 'https://www.googleapis.com/auth/bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-bigtable.data', + 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + scopes=["1", "2"], + default_host="bigtable.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport]) +def test_bigtable_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + +def test_bigtable_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.BigtableRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_bigtable_host_no_port(transport_name): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='bigtable.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'bigtable.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://bigtable.googleapis.com' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_bigtable_host_with_port(transport_name): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='bigtable.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'bigtable.googleapis.com:8000' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://bigtable.googleapis.com:8000' + ) + +@pytest.mark.parametrize("transport_name", [ + "rest", +]) +def test_bigtable_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = BigtableClient( + credentials=creds1, + transport=transport_name, + ) + client2 = BigtableClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.read_rows._session + session2 = client2.transport.read_rows._session + assert session1 != session2 + session1 = client1.transport.sample_row_keys._session + session2 = client2.transport.sample_row_keys._session + assert session1 != session2 + session1 = client1.transport.mutate_row._session + session2 = client2.transport.mutate_row._session + assert session1 != session2 + session1 = client1.transport.mutate_rows._session + session2 = client2.transport.mutate_rows._session + assert session1 != session2 + session1 = client1.transport.check_and_mutate_row._session + session2 = client2.transport.check_and_mutate_row._session + assert session1 != session2 + session1 = client1.transport.ping_and_warm._session + session2 = client2.transport.ping_and_warm._session + assert session1 != session2 + session1 = client1.transport.read_modify_write_row._session + session2 = client2.transport.read_modify_write_row._session + assert session1 != session2 + session1 = client1.transport.generate_initial_change_stream_partitions._session + session2 = client2.transport.generate_initial_change_stream_partitions._session + assert session1 != session2 + session1 = client1.transport.read_change_stream._session + session2 = client2.transport.read_change_stream._session + assert session1 != session2 +def test_bigtable_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_bigtable_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport]) +def test_bigtable_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport]) +def test_bigtable_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_instance_path(): + project = "squid" + instance = "clam" + expected = "projects/{project}/instances/{instance}".format(project=project, instance=instance, ) + actual = BigtableClient.instance_path(project, instance) + assert expected == actual + + +def test_parse_instance_path(): + expected = { + "project": "whelk", + "instance": "octopus", + } + path = BigtableClient.instance_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_instance_path(path) + assert expected == actual + +def test_table_path(): + project = "oyster" + instance = "nudibranch" + table = "cuttlefish" + expected = "projects/{project}/instances/{instance}/tables/{table}".format(project=project, instance=instance, table=table, ) + actual = BigtableClient.table_path(project, instance, table) + assert expected == actual + + +def test_parse_table_path(): + expected = { + "project": "mussel", + "instance": "winkle", + "table": "nautilus", + } + path = BigtableClient.table_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_table_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "scallop" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = BigtableClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "abalone", + } + path = BigtableClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "squid" + expected = "folders/{folder}".format(folder=folder, ) + actual = BigtableClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "clam", + } + path = BigtableClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "whelk" + expected = "organizations/{organization}".format(organization=organization, ) + actual = BigtableClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "octopus", + } + path = BigtableClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "oyster" + expected = "projects/{project}".format(project=project, ) + actual = BigtableClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nudibranch", + } + path = BigtableClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "cuttlefish" + location = "mussel" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = BigtableClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "winkle", + "location": "nautilus", + } + path = BigtableClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.BigtableTransport, '_prep_wrapped_messages') as prep: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.BigtableTransport, '_prep_wrapped_messages') as prep: + transport_class = BigtableClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = BigtableAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + 'grpc', + ] + for transport in transports: + client = BigtableClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (BigtableClient, transports.BigtableGrpcTransport), + (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/owl-bot-staging/bigtable_admin/v2/.coveragerc b/owl-bot-staging/bigtable_admin/v2/.coveragerc new file mode 100644 index 000000000..2fb79c4ed --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/.coveragerc @@ -0,0 +1,13 @@ +[run] +branch = True + +[report] +show_missing = True +omit = + google/cloud/bigtable_admin/__init__.py + google/cloud/bigtable_admin/gapic_version.py +exclude_lines = + # Re-enable the standard pragma + pragma: NO COVER + # Ignore debug-only repr + def __repr__ diff --git a/owl-bot-staging/bigtable_admin/v2/.flake8 b/owl-bot-staging/bigtable_admin/v2/.flake8 new file mode 100644 index 000000000..29227d4cf --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/.flake8 @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Generated by synthtool. DO NOT EDIT! +[flake8] +ignore = E203, E266, E501, W503 +exclude = + # Exclude generated code. + **/proto/** + **/gapic/** + **/services/** + **/types/** + *_pb2.py + + # Standard linting exemptions. + **/.nox/** + __pycache__, + .git, + *.pyc, + conf.py diff --git a/owl-bot-staging/bigtable_admin/v2/MANIFEST.in b/owl-bot-staging/bigtable_admin/v2/MANIFEST.in new file mode 100644 index 000000000..d9b71fd61 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/MANIFEST.in @@ -0,0 +1,2 @@ +recursive-include google/cloud/bigtable_admin *.py +recursive-include google/cloud/bigtable_admin_v2 *.py diff --git a/owl-bot-staging/bigtable_admin/v2/README.rst b/owl-bot-staging/bigtable_admin/v2/README.rst new file mode 100644 index 000000000..93f147497 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/README.rst @@ -0,0 +1,49 @@ +Python Client for Google Cloud Bigtable Admin API +================================================= + +Quick Start +----------- + +In order to use this library, you first need to go through the following steps: + +1. `Select or create a Cloud Platform project.`_ +2. `Enable billing for your project.`_ +3. Enable the Google Cloud Bigtable Admin API. +4. `Setup Authentication.`_ + +.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project +.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project +.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html + +Installation +~~~~~~~~~~~~ + +Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to +create isolated Python environments. The basic problem it addresses is one of +dependencies and versions, and indirectly permissions. + +With `virtualenv`_, it's possible to install this library without needing system +install permissions, and without clashing with the installed system +dependencies. + +.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ + + +Mac/Linux +^^^^^^^^^ + +.. code-block:: console + + python3 -m venv + source /bin/activate + /bin/pip install /path/to/library + + +Windows +^^^^^^^ + +.. code-block:: console + + python3 -m venv + \Scripts\activate + \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/bigtable_admin/v2/docs/_static/custom.css b/owl-bot-staging/bigtable_admin/v2/docs/_static/custom.css new file mode 100644 index 000000000..06423be0b --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/docs/_static/custom.css @@ -0,0 +1,3 @@ +dl.field-list > dt { + min-width: 100px +} diff --git a/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_instance_admin.rst b/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_instance_admin.rst new file mode 100644 index 000000000..42f7caad7 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_instance_admin.rst @@ -0,0 +1,10 @@ +BigtableInstanceAdmin +--------------------------------------- + +.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin + :members: + :inherited-members: + +.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_table_admin.rst b/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_table_admin.rst new file mode 100644 index 000000000..e10ff3ac6 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_table_admin.rst @@ -0,0 +1,10 @@ +BigtableTableAdmin +------------------------------------ + +.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_table_admin + :members: + :inherited-members: + +.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers + :members: + :inherited-members: diff --git a/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/services.rst b/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/services.rst new file mode 100644 index 000000000..ea55c7da1 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/services.rst @@ -0,0 +1,7 @@ +Services for Google Cloud Bigtable Admin v2 API +=============================================== +.. toctree:: + :maxdepth: 2 + + bigtable_instance_admin + bigtable_table_admin diff --git a/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/types.rst b/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/types.rst new file mode 100644 index 000000000..2f935927a --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/types.rst @@ -0,0 +1,6 @@ +Types for Google Cloud Bigtable Admin v2 API +============================================ + +.. automodule:: google.cloud.bigtable_admin_v2.types + :members: + :show-inheritance: diff --git a/owl-bot-staging/bigtable_admin/v2/docs/conf.py b/owl-bot-staging/bigtable_admin/v2/docs/conf.py new file mode 100644 index 000000000..0846814bd --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/docs/conf.py @@ -0,0 +1,376 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# +# google-cloud-bigtable-admin documentation build configuration file +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +import sys +import os +import shlex + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +sys.path.insert(0, os.path.abspath("..")) + +__version__ = "0.1.0" + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +needs_sphinx = "4.0.1" + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.autosummary", + "sphinx.ext.intersphinx", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.todo", + "sphinx.ext.viewcode", +] + +# autodoc/autosummary flags +autoclass_content = "both" +autodoc_default_flags = ["members"] +autosummary_generate = True + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# Allow markdown includes (so releases.md can include CHANGLEOG.md) +# http://www.sphinx-doc.org/en/master/markdown.html +source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +source_suffix = [".rst", ".md"] + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The root toctree document. +root_doc = "index" + +# General information about the project. +project = u"google-cloud-bigtable-admin" +copyright = u"2023, Google, LLC" +author = u"Google APIs" # TODO: autogenerate this bit + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The full version, including alpha/beta/rc tags. +release = __version__ +# The short X.Y version. +version = ".".join(release.split(".")[0:2]) + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = 'en' + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = ["_build"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +html_theme_options = { + "description": "Google Cloud Client Libraries for Python", + "github_user": "googleapis", + "github_repo": "google-cloud-python", + "github_banner": True, + "font_family": "'Roboto', Georgia, sans", + "head_font_family": "'Roboto', Georgia, serif", + "code_font_family": "'Roboto Mono', 'Consolas', monospace", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# Now only 'ja' uses this config value +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "google-cloud-bigtable-admin-doc" + +# -- Options for warnings ------------------------------------------------------ + + +suppress_warnings = [ + # Temporarily suppress this to avoid "more than one target found for + # cross-reference" warning, which are intractable for us to avoid while in + # a mono-repo. + # See https://github.com/sphinx-doc/sphinx/blob + # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 + "ref.python" +] + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # 'preamble': '', + # Latex figure (float) alignment + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + root_doc, + "google-cloud-bigtable-admin.tex", + u"google-cloud-bigtable-admin Documentation", + author, + "manual", + ) +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + root_doc, + "google-cloud-bigtable-admin", + u"Google Cloud Bigtable Admin Documentation", + [author], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + root_doc, + "google-cloud-bigtable-admin", + u"google-cloud-bigtable-admin Documentation", + author, + "google-cloud-bigtable-admin", + "GAPIC library for Google Cloud Bigtable Admin API", + "APIs", + ) +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + "python": ("http://python.readthedocs.org/en/latest/", None), + "gax": ("https://gax-python.readthedocs.org/en/latest/", None), + "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), + "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), + "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), + "grpc": ("https://grpc.io/grpc/python/", None), + "requests": ("http://requests.kennethreitz.org/en/stable/", None), + "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), + "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), +} + + +# Napoleon settings +napoleon_google_docstring = True +napoleon_numpy_docstring = True +napoleon_include_private_with_doc = False +napoleon_include_special_with_doc = True +napoleon_use_admonition_for_examples = False +napoleon_use_admonition_for_notes = False +napoleon_use_admonition_for_references = False +napoleon_use_ivar = False +napoleon_use_param = True +napoleon_use_rtype = True diff --git a/owl-bot-staging/bigtable_admin/v2/docs/index.rst b/owl-bot-staging/bigtable_admin/v2/docs/index.rst new file mode 100644 index 000000000..ed3f64340 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/docs/index.rst @@ -0,0 +1,7 @@ +API Reference +------------- +.. toctree:: + :maxdepth: 2 + + bigtable_admin_v2/services + bigtable_admin_v2/types diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/__init__.py new file mode 100644 index 000000000..62b0303ba --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/__init__.py @@ -0,0 +1,189 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.bigtable_admin import gapic_version as package_version + +__version__ = package_version.__version__ + + +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.client import BigtableInstanceAdminClient +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.async_client import BigtableInstanceAdminAsyncClient +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.client import BigtableTableAdminClient +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.async_client import BigtableTableAdminAsyncClient + +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import CreateAppProfileRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import CreateClusterMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import CreateClusterRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import CreateInstanceMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import CreateInstanceRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import DeleteAppProfileRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import DeleteClusterRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import DeleteInstanceRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import GetAppProfileRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import GetClusterRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import GetInstanceRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListAppProfilesRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListAppProfilesResponse +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListClustersRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListClustersResponse +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListHotTabletsRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListHotTabletsResponse +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListInstancesRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListInstancesResponse +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import PartialUpdateClusterMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import PartialUpdateClusterRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import PartialUpdateInstanceRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import UpdateAppProfileMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import UpdateAppProfileRequest +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import UpdateClusterMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import UpdateInstanceMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CheckConsistencyRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CheckConsistencyResponse +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateBackupMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateBackupRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateTableFromSnapshotMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateTableFromSnapshotRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateTableRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import DeleteBackupRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import DeleteSnapshotRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import DeleteTableRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import DropRowRangeRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GenerateConsistencyTokenRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GenerateConsistencyTokenResponse +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetBackupRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetSnapshotRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetTableRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListBackupsRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListBackupsResponse +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListSnapshotsRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListSnapshotsResponse +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListTablesRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListTablesResponse +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ModifyColumnFamiliesRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import OptimizeRestoredTableMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import RestoreTableMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import RestoreTableRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import SnapshotTableMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import SnapshotTableRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import UndeleteTableMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import UndeleteTableRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import UpdateBackupRequest +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import UpdateTableMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import UpdateTableRequest +from google.cloud.bigtable_admin_v2.types.common import OperationProgress +from google.cloud.bigtable_admin_v2.types.common import StorageType +from google.cloud.bigtable_admin_v2.types.instance import AppProfile +from google.cloud.bigtable_admin_v2.types.instance import AutoscalingLimits +from google.cloud.bigtable_admin_v2.types.instance import AutoscalingTargets +from google.cloud.bigtable_admin_v2.types.instance import Cluster +from google.cloud.bigtable_admin_v2.types.instance import HotTablet +from google.cloud.bigtable_admin_v2.types.instance import Instance +from google.cloud.bigtable_admin_v2.types.table import Backup +from google.cloud.bigtable_admin_v2.types.table import BackupInfo +from google.cloud.bigtable_admin_v2.types.table import ChangeStreamConfig +from google.cloud.bigtable_admin_v2.types.table import ColumnFamily +from google.cloud.bigtable_admin_v2.types.table import EncryptionInfo +from google.cloud.bigtable_admin_v2.types.table import GcRule +from google.cloud.bigtable_admin_v2.types.table import RestoreInfo +from google.cloud.bigtable_admin_v2.types.table import Snapshot +from google.cloud.bigtable_admin_v2.types.table import Table +from google.cloud.bigtable_admin_v2.types.table import RestoreSourceType + +__all__ = ('BigtableInstanceAdminClient', + 'BigtableInstanceAdminAsyncClient', + 'BigtableTableAdminClient', + 'BigtableTableAdminAsyncClient', + 'CreateAppProfileRequest', + 'CreateClusterMetadata', + 'CreateClusterRequest', + 'CreateInstanceMetadata', + 'CreateInstanceRequest', + 'DeleteAppProfileRequest', + 'DeleteClusterRequest', + 'DeleteInstanceRequest', + 'GetAppProfileRequest', + 'GetClusterRequest', + 'GetInstanceRequest', + 'ListAppProfilesRequest', + 'ListAppProfilesResponse', + 'ListClustersRequest', + 'ListClustersResponse', + 'ListHotTabletsRequest', + 'ListHotTabletsResponse', + 'ListInstancesRequest', + 'ListInstancesResponse', + 'PartialUpdateClusterMetadata', + 'PartialUpdateClusterRequest', + 'PartialUpdateInstanceRequest', + 'UpdateAppProfileMetadata', + 'UpdateAppProfileRequest', + 'UpdateClusterMetadata', + 'UpdateInstanceMetadata', + 'CheckConsistencyRequest', + 'CheckConsistencyResponse', + 'CopyBackupMetadata', + 'CopyBackupRequest', + 'CreateBackupMetadata', + 'CreateBackupRequest', + 'CreateTableFromSnapshotMetadata', + 'CreateTableFromSnapshotRequest', + 'CreateTableRequest', + 'DeleteBackupRequest', + 'DeleteSnapshotRequest', + 'DeleteTableRequest', + 'DropRowRangeRequest', + 'GenerateConsistencyTokenRequest', + 'GenerateConsistencyTokenResponse', + 'GetBackupRequest', + 'GetSnapshotRequest', + 'GetTableRequest', + 'ListBackupsRequest', + 'ListBackupsResponse', + 'ListSnapshotsRequest', + 'ListSnapshotsResponse', + 'ListTablesRequest', + 'ListTablesResponse', + 'ModifyColumnFamiliesRequest', + 'OptimizeRestoredTableMetadata', + 'RestoreTableMetadata', + 'RestoreTableRequest', + 'SnapshotTableMetadata', + 'SnapshotTableRequest', + 'UndeleteTableMetadata', + 'UndeleteTableRequest', + 'UpdateBackupRequest', + 'UpdateTableMetadata', + 'UpdateTableRequest', + 'OperationProgress', + 'StorageType', + 'AppProfile', + 'AutoscalingLimits', + 'AutoscalingTargets', + 'Cluster', + 'HotTablet', + 'Instance', + 'Backup', + 'BackupInfo', + 'ChangeStreamConfig', + 'ColumnFamily', + 'EncryptionInfo', + 'GcRule', + 'RestoreInfo', + 'Snapshot', + 'Table', + 'RestoreSourceType', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/gapic_version.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/gapic_version.py new file mode 100644 index 000000000..360a0d13e --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/py.typed b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/py.typed new file mode 100644 index 000000000..bc26f2069 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-bigtable-admin package uses inline types. diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/__init__.py new file mode 100644 index 000000000..97f640900 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/__init__.py @@ -0,0 +1,190 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from google.cloud.bigtable_admin_v2 import gapic_version as package_version + +__version__ = package_version.__version__ + + +from .services.bigtable_instance_admin import BigtableInstanceAdminClient +from .services.bigtable_instance_admin import BigtableInstanceAdminAsyncClient +from .services.bigtable_table_admin import BigtableTableAdminClient +from .services.bigtable_table_admin import BigtableTableAdminAsyncClient + +from .types.bigtable_instance_admin import CreateAppProfileRequest +from .types.bigtable_instance_admin import CreateClusterMetadata +from .types.bigtable_instance_admin import CreateClusterRequest +from .types.bigtable_instance_admin import CreateInstanceMetadata +from .types.bigtable_instance_admin import CreateInstanceRequest +from .types.bigtable_instance_admin import DeleteAppProfileRequest +from .types.bigtable_instance_admin import DeleteClusterRequest +from .types.bigtable_instance_admin import DeleteInstanceRequest +from .types.bigtable_instance_admin import GetAppProfileRequest +from .types.bigtable_instance_admin import GetClusterRequest +from .types.bigtable_instance_admin import GetInstanceRequest +from .types.bigtable_instance_admin import ListAppProfilesRequest +from .types.bigtable_instance_admin import ListAppProfilesResponse +from .types.bigtable_instance_admin import ListClustersRequest +from .types.bigtable_instance_admin import ListClustersResponse +from .types.bigtable_instance_admin import ListHotTabletsRequest +from .types.bigtable_instance_admin import ListHotTabletsResponse +from .types.bigtable_instance_admin import ListInstancesRequest +from .types.bigtable_instance_admin import ListInstancesResponse +from .types.bigtable_instance_admin import PartialUpdateClusterMetadata +from .types.bigtable_instance_admin import PartialUpdateClusterRequest +from .types.bigtable_instance_admin import PartialUpdateInstanceRequest +from .types.bigtable_instance_admin import UpdateAppProfileMetadata +from .types.bigtable_instance_admin import UpdateAppProfileRequest +from .types.bigtable_instance_admin import UpdateClusterMetadata +from .types.bigtable_instance_admin import UpdateInstanceMetadata +from .types.bigtable_table_admin import CheckConsistencyRequest +from .types.bigtable_table_admin import CheckConsistencyResponse +from .types.bigtable_table_admin import CopyBackupMetadata +from .types.bigtable_table_admin import CopyBackupRequest +from .types.bigtable_table_admin import CreateBackupMetadata +from .types.bigtable_table_admin import CreateBackupRequest +from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata +from .types.bigtable_table_admin import CreateTableFromSnapshotRequest +from .types.bigtable_table_admin import CreateTableRequest +from .types.bigtable_table_admin import DeleteBackupRequest +from .types.bigtable_table_admin import DeleteSnapshotRequest +from .types.bigtable_table_admin import DeleteTableRequest +from .types.bigtable_table_admin import DropRowRangeRequest +from .types.bigtable_table_admin import GenerateConsistencyTokenRequest +from .types.bigtable_table_admin import GenerateConsistencyTokenResponse +from .types.bigtable_table_admin import GetBackupRequest +from .types.bigtable_table_admin import GetSnapshotRequest +from .types.bigtable_table_admin import GetTableRequest +from .types.bigtable_table_admin import ListBackupsRequest +from .types.bigtable_table_admin import ListBackupsResponse +from .types.bigtable_table_admin import ListSnapshotsRequest +from .types.bigtable_table_admin import ListSnapshotsResponse +from .types.bigtable_table_admin import ListTablesRequest +from .types.bigtable_table_admin import ListTablesResponse +from .types.bigtable_table_admin import ModifyColumnFamiliesRequest +from .types.bigtable_table_admin import OptimizeRestoredTableMetadata +from .types.bigtable_table_admin import RestoreTableMetadata +from .types.bigtable_table_admin import RestoreTableRequest +from .types.bigtable_table_admin import SnapshotTableMetadata +from .types.bigtable_table_admin import SnapshotTableRequest +from .types.bigtable_table_admin import UndeleteTableMetadata +from .types.bigtable_table_admin import UndeleteTableRequest +from .types.bigtable_table_admin import UpdateBackupRequest +from .types.bigtable_table_admin import UpdateTableMetadata +from .types.bigtable_table_admin import UpdateTableRequest +from .types.common import OperationProgress +from .types.common import StorageType +from .types.instance import AppProfile +from .types.instance import AutoscalingLimits +from .types.instance import AutoscalingTargets +from .types.instance import Cluster +from .types.instance import HotTablet +from .types.instance import Instance +from .types.table import Backup +from .types.table import BackupInfo +from .types.table import ChangeStreamConfig +from .types.table import ColumnFamily +from .types.table import EncryptionInfo +from .types.table import GcRule +from .types.table import RestoreInfo +from .types.table import Snapshot +from .types.table import Table +from .types.table import RestoreSourceType + +__all__ = ( + 'BigtableInstanceAdminAsyncClient', + 'BigtableTableAdminAsyncClient', +'AppProfile', +'AutoscalingLimits', +'AutoscalingTargets', +'Backup', +'BackupInfo', +'BigtableInstanceAdminClient', +'BigtableTableAdminClient', +'ChangeStreamConfig', +'CheckConsistencyRequest', +'CheckConsistencyResponse', +'Cluster', +'ColumnFamily', +'CopyBackupMetadata', +'CopyBackupRequest', +'CreateAppProfileRequest', +'CreateBackupMetadata', +'CreateBackupRequest', +'CreateClusterMetadata', +'CreateClusterRequest', +'CreateInstanceMetadata', +'CreateInstanceRequest', +'CreateTableFromSnapshotMetadata', +'CreateTableFromSnapshotRequest', +'CreateTableRequest', +'DeleteAppProfileRequest', +'DeleteBackupRequest', +'DeleteClusterRequest', +'DeleteInstanceRequest', +'DeleteSnapshotRequest', +'DeleteTableRequest', +'DropRowRangeRequest', +'EncryptionInfo', +'GcRule', +'GenerateConsistencyTokenRequest', +'GenerateConsistencyTokenResponse', +'GetAppProfileRequest', +'GetBackupRequest', +'GetClusterRequest', +'GetInstanceRequest', +'GetSnapshotRequest', +'GetTableRequest', +'HotTablet', +'Instance', +'ListAppProfilesRequest', +'ListAppProfilesResponse', +'ListBackupsRequest', +'ListBackupsResponse', +'ListClustersRequest', +'ListClustersResponse', +'ListHotTabletsRequest', +'ListHotTabletsResponse', +'ListInstancesRequest', +'ListInstancesResponse', +'ListSnapshotsRequest', +'ListSnapshotsResponse', +'ListTablesRequest', +'ListTablesResponse', +'ModifyColumnFamiliesRequest', +'OperationProgress', +'OptimizeRestoredTableMetadata', +'PartialUpdateClusterMetadata', +'PartialUpdateClusterRequest', +'PartialUpdateInstanceRequest', +'RestoreInfo', +'RestoreSourceType', +'RestoreTableMetadata', +'RestoreTableRequest', +'Snapshot', +'SnapshotTableMetadata', +'SnapshotTableRequest', +'StorageType', +'Table', +'UndeleteTableMetadata', +'UndeleteTableRequest', +'UpdateAppProfileMetadata', +'UpdateAppProfileRequest', +'UpdateBackupRequest', +'UpdateClusterMetadata', +'UpdateInstanceMetadata', +'UpdateTableMetadata', +'UpdateTableRequest', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_metadata.json b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_metadata.json new file mode 100644 index 000000000..9b3426470 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -0,0 +1,737 @@ + { + "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", + "language": "python", + "libraryPackage": "google.cloud.bigtable_admin_v2", + "protoPackage": "google.bigtable.admin.v2", + "schema": "1.0", + "services": { + "BigtableInstanceAdmin": { + "clients": { + "grpc": { + "libraryClient": "BigtableInstanceAdminClient", + "rpcs": { + "CreateAppProfile": { + "methods": [ + "create_app_profile" + ] + }, + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "CreateInstance": { + "methods": [ + "create_instance" + ] + }, + "DeleteAppProfile": { + "methods": [ + "delete_app_profile" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DeleteInstance": { + "methods": [ + "delete_instance" + ] + }, + "GetAppProfile": { + "methods": [ + "get_app_profile" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetInstance": { + "methods": [ + "get_instance" + ] + }, + "ListAppProfiles": { + "methods": [ + "list_app_profiles" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "ListHotTablets": { + "methods": [ + "list_hot_tablets" + ] + }, + "ListInstances": { + "methods": [ + "list_instances" + ] + }, + "PartialUpdateCluster": { + "methods": [ + "partial_update_cluster" + ] + }, + "PartialUpdateInstance": { + "methods": [ + "partial_update_instance" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UpdateAppProfile": { + "methods": [ + "update_app_profile" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + }, + "UpdateInstance": { + "methods": [ + "update_instance" + ] + } + } + }, + "grpc-async": { + "libraryClient": "BigtableInstanceAdminAsyncClient", + "rpcs": { + "CreateAppProfile": { + "methods": [ + "create_app_profile" + ] + }, + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "CreateInstance": { + "methods": [ + "create_instance" + ] + }, + "DeleteAppProfile": { + "methods": [ + "delete_app_profile" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DeleteInstance": { + "methods": [ + "delete_instance" + ] + }, + "GetAppProfile": { + "methods": [ + "get_app_profile" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetInstance": { + "methods": [ + "get_instance" + ] + }, + "ListAppProfiles": { + "methods": [ + "list_app_profiles" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "ListHotTablets": { + "methods": [ + "list_hot_tablets" + ] + }, + "ListInstances": { + "methods": [ + "list_instances" + ] + }, + "PartialUpdateCluster": { + "methods": [ + "partial_update_cluster" + ] + }, + "PartialUpdateInstance": { + "methods": [ + "partial_update_instance" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UpdateAppProfile": { + "methods": [ + "update_app_profile" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + }, + "UpdateInstance": { + "methods": [ + "update_instance" + ] + } + } + }, + "rest": { + "libraryClient": "BigtableInstanceAdminClient", + "rpcs": { + "CreateAppProfile": { + "methods": [ + "create_app_profile" + ] + }, + "CreateCluster": { + "methods": [ + "create_cluster" + ] + }, + "CreateInstance": { + "methods": [ + "create_instance" + ] + }, + "DeleteAppProfile": { + "methods": [ + "delete_app_profile" + ] + }, + "DeleteCluster": { + "methods": [ + "delete_cluster" + ] + }, + "DeleteInstance": { + "methods": [ + "delete_instance" + ] + }, + "GetAppProfile": { + "methods": [ + "get_app_profile" + ] + }, + "GetCluster": { + "methods": [ + "get_cluster" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetInstance": { + "methods": [ + "get_instance" + ] + }, + "ListAppProfiles": { + "methods": [ + "list_app_profiles" + ] + }, + "ListClusters": { + "methods": [ + "list_clusters" + ] + }, + "ListHotTablets": { + "methods": [ + "list_hot_tablets" + ] + }, + "ListInstances": { + "methods": [ + "list_instances" + ] + }, + "PartialUpdateCluster": { + "methods": [ + "partial_update_cluster" + ] + }, + "PartialUpdateInstance": { + "methods": [ + "partial_update_instance" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UpdateAppProfile": { + "methods": [ + "update_app_profile" + ] + }, + "UpdateCluster": { + "methods": [ + "update_cluster" + ] + }, + "UpdateInstance": { + "methods": [ + "update_instance" + ] + } + } + } + } + }, + "BigtableTableAdmin": { + "clients": { + "grpc": { + "libraryClient": "BigtableTableAdminClient", + "rpcs": { + "CheckConsistency": { + "methods": [ + "check_consistency" + ] + }, + "CopyBackup": { + "methods": [ + "copy_backup" + ] + }, + "CreateBackup": { + "methods": [ + "create_backup" + ] + }, + "CreateTable": { + "methods": [ + "create_table" + ] + }, + "CreateTableFromSnapshot": { + "methods": [ + "create_table_from_snapshot" + ] + }, + "DeleteBackup": { + "methods": [ + "delete_backup" + ] + }, + "DeleteSnapshot": { + "methods": [ + "delete_snapshot" + ] + }, + "DeleteTable": { + "methods": [ + "delete_table" + ] + }, + "DropRowRange": { + "methods": [ + "drop_row_range" + ] + }, + "GenerateConsistencyToken": { + "methods": [ + "generate_consistency_token" + ] + }, + "GetBackup": { + "methods": [ + "get_backup" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetSnapshot": { + "methods": [ + "get_snapshot" + ] + }, + "GetTable": { + "methods": [ + "get_table" + ] + }, + "ListBackups": { + "methods": [ + "list_backups" + ] + }, + "ListSnapshots": { + "methods": [ + "list_snapshots" + ] + }, + "ListTables": { + "methods": [ + "list_tables" + ] + }, + "ModifyColumnFamilies": { + "methods": [ + "modify_column_families" + ] + }, + "RestoreTable": { + "methods": [ + "restore_table" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "SnapshotTable": { + "methods": [ + "snapshot_table" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UndeleteTable": { + "methods": [ + "undelete_table" + ] + }, + "UpdateBackup": { + "methods": [ + "update_backup" + ] + }, + "UpdateTable": { + "methods": [ + "update_table" + ] + } + } + }, + "grpc-async": { + "libraryClient": "BigtableTableAdminAsyncClient", + "rpcs": { + "CheckConsistency": { + "methods": [ + "check_consistency" + ] + }, + "CopyBackup": { + "methods": [ + "copy_backup" + ] + }, + "CreateBackup": { + "methods": [ + "create_backup" + ] + }, + "CreateTable": { + "methods": [ + "create_table" + ] + }, + "CreateTableFromSnapshot": { + "methods": [ + "create_table_from_snapshot" + ] + }, + "DeleteBackup": { + "methods": [ + "delete_backup" + ] + }, + "DeleteSnapshot": { + "methods": [ + "delete_snapshot" + ] + }, + "DeleteTable": { + "methods": [ + "delete_table" + ] + }, + "DropRowRange": { + "methods": [ + "drop_row_range" + ] + }, + "GenerateConsistencyToken": { + "methods": [ + "generate_consistency_token" + ] + }, + "GetBackup": { + "methods": [ + "get_backup" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetSnapshot": { + "methods": [ + "get_snapshot" + ] + }, + "GetTable": { + "methods": [ + "get_table" + ] + }, + "ListBackups": { + "methods": [ + "list_backups" + ] + }, + "ListSnapshots": { + "methods": [ + "list_snapshots" + ] + }, + "ListTables": { + "methods": [ + "list_tables" + ] + }, + "ModifyColumnFamilies": { + "methods": [ + "modify_column_families" + ] + }, + "RestoreTable": { + "methods": [ + "restore_table" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "SnapshotTable": { + "methods": [ + "snapshot_table" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UndeleteTable": { + "methods": [ + "undelete_table" + ] + }, + "UpdateBackup": { + "methods": [ + "update_backup" + ] + }, + "UpdateTable": { + "methods": [ + "update_table" + ] + } + } + }, + "rest": { + "libraryClient": "BigtableTableAdminClient", + "rpcs": { + "CheckConsistency": { + "methods": [ + "check_consistency" + ] + }, + "CopyBackup": { + "methods": [ + "copy_backup" + ] + }, + "CreateBackup": { + "methods": [ + "create_backup" + ] + }, + "CreateTable": { + "methods": [ + "create_table" + ] + }, + "CreateTableFromSnapshot": { + "methods": [ + "create_table_from_snapshot" + ] + }, + "DeleteBackup": { + "methods": [ + "delete_backup" + ] + }, + "DeleteSnapshot": { + "methods": [ + "delete_snapshot" + ] + }, + "DeleteTable": { + "methods": [ + "delete_table" + ] + }, + "DropRowRange": { + "methods": [ + "drop_row_range" + ] + }, + "GenerateConsistencyToken": { + "methods": [ + "generate_consistency_token" + ] + }, + "GetBackup": { + "methods": [ + "get_backup" + ] + }, + "GetIamPolicy": { + "methods": [ + "get_iam_policy" + ] + }, + "GetSnapshot": { + "methods": [ + "get_snapshot" + ] + }, + "GetTable": { + "methods": [ + "get_table" + ] + }, + "ListBackups": { + "methods": [ + "list_backups" + ] + }, + "ListSnapshots": { + "methods": [ + "list_snapshots" + ] + }, + "ListTables": { + "methods": [ + "list_tables" + ] + }, + "ModifyColumnFamilies": { + "methods": [ + "modify_column_families" + ] + }, + "RestoreTable": { + "methods": [ + "restore_table" + ] + }, + "SetIamPolicy": { + "methods": [ + "set_iam_policy" + ] + }, + "SnapshotTable": { + "methods": [ + "snapshot_table" + ] + }, + "TestIamPermissions": { + "methods": [ + "test_iam_permissions" + ] + }, + "UndeleteTable": { + "methods": [ + "undelete_table" + ] + }, + "UpdateBackup": { + "methods": [ + "update_backup" + ] + }, + "UpdateTable": { + "methods": [ + "update_table" + ] + } + } + } + } + } + } +} diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_version.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_version.py new file mode 100644 index 000000000..360a0d13e --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_version.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/py.typed b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/py.typed new file mode 100644 index 000000000..bc26f2069 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/py.typed @@ -0,0 +1,2 @@ +# Marker file for PEP 561. +# The google-cloud-bigtable-admin package uses inline types. diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/__init__.py new file mode 100644 index 000000000..89a37dc92 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/__init__.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py new file mode 100644 index 000000000..2ff9a47f4 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import BigtableInstanceAdminClient +from .async_client import BigtableInstanceAdminAsyncClient + +__all__ = ( + 'BigtableInstanceAdminClient', + 'BigtableInstanceAdminAsyncClient', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py new file mode 100644 index 000000000..d9518094e --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -0,0 +1,2168 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union + +from google.cloud.bigtable_admin_v2 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import instance +from google.cloud.bigtable_admin_v2.types import instance as gba_instance +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport +from .client import BigtableInstanceAdminClient + + +class BigtableInstanceAdminAsyncClient: + """Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + """ + + _client: BigtableInstanceAdminClient + + DEFAULT_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + + app_profile_path = staticmethod(BigtableInstanceAdminClient.app_profile_path) + parse_app_profile_path = staticmethod(BigtableInstanceAdminClient.parse_app_profile_path) + cluster_path = staticmethod(BigtableInstanceAdminClient.cluster_path) + parse_cluster_path = staticmethod(BigtableInstanceAdminClient.parse_cluster_path) + crypto_key_path = staticmethod(BigtableInstanceAdminClient.crypto_key_path) + parse_crypto_key_path = staticmethod(BigtableInstanceAdminClient.parse_crypto_key_path) + hot_tablet_path = staticmethod(BigtableInstanceAdminClient.hot_tablet_path) + parse_hot_tablet_path = staticmethod(BigtableInstanceAdminClient.parse_hot_tablet_path) + instance_path = staticmethod(BigtableInstanceAdminClient.instance_path) + parse_instance_path = staticmethod(BigtableInstanceAdminClient.parse_instance_path) + table_path = staticmethod(BigtableInstanceAdminClient.table_path) + parse_table_path = staticmethod(BigtableInstanceAdminClient.parse_table_path) + common_billing_account_path = staticmethod(BigtableInstanceAdminClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(BigtableInstanceAdminClient.parse_common_billing_account_path) + common_folder_path = staticmethod(BigtableInstanceAdminClient.common_folder_path) + parse_common_folder_path = staticmethod(BigtableInstanceAdminClient.parse_common_folder_path) + common_organization_path = staticmethod(BigtableInstanceAdminClient.common_organization_path) + parse_common_organization_path = staticmethod(BigtableInstanceAdminClient.parse_common_organization_path) + common_project_path = staticmethod(BigtableInstanceAdminClient.common_project_path) + parse_common_project_path = staticmethod(BigtableInstanceAdminClient.parse_common_project_path) + common_location_path = staticmethod(BigtableInstanceAdminClient.common_location_path) + parse_common_location_path = staticmethod(BigtableInstanceAdminClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableInstanceAdminAsyncClient: The constructed client. + """ + return BigtableInstanceAdminClient.from_service_account_info.__func__(BigtableInstanceAdminAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableInstanceAdminAsyncClient: The constructed client. + """ + return BigtableInstanceAdminClient.from_service_account_file.__func__(BigtableInstanceAdminAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return BigtableInstanceAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> BigtableInstanceAdminTransport: + """Returns the transport used by the client instance. + + Returns: + BigtableInstanceAdminTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(BigtableInstanceAdminClient).get_transport_class, type(BigtableInstanceAdminClient)) + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, BigtableInstanceAdminTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the bigtable instance admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.BigtableInstanceAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = BigtableInstanceAdminClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_instance(self, + request: Optional[Union[bigtable_instance_admin.CreateInstanceRequest, dict]] = None, + *, + parent: Optional[str] = None, + instance_id: Optional[str] = None, + instance: Optional[gba_instance.Instance] = None, + clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Create an instance within a project. + + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.CreateInstance. + parent (:class:`str`): + Required. The unique name of the project in which to + create the new instance. Values are of the form + ``projects/{project}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_id (:class:`str`): + Required. The ID to be used when referring to the new + instance within its project, e.g., just ``myinstance`` + rather than ``projects/myproject/instances/myinstance``. + + This corresponds to the ``instance_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`): + Required. The instance to create. Fields marked + ``OutputOnly`` must be left blank. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + clusters (:class:`MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]`): + Required. The clusters to be created within the + instance, mapped by desired cluster ID, e.g., just + ``mycluster`` rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + Fields marked ``OutputOnly`` must be left blank. + Currently, at most four clusters can be specified. + + This corresponds to the ``clusters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, instance_id, instance, clusters]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.CreateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if instance_id is not None: + request.instance_id = instance_id + if instance is not None: + request.instance = instance + + if clusters: + request.clusters.update(clusters) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_instance, + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gba_instance.Instance, + metadata_type=bigtable_instance_admin.CreateInstanceMetadata, + ) + + # Done; return the response. + return response + + async def get_instance(self, + request: Optional[Union[bigtable_instance_admin.GetInstanceRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Gets information about an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.GetInstance. + name (:class:`str`): + Required. The unique name of the requested instance. + Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Instance: + A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.GetInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_instance, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_instances(self, + request: Optional[Union[bigtable_instance_admin.ListInstancesRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListInstancesResponse: + r"""Lists information about instances in a project. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.ListInstances. + parent (:class:`str`): + Required. The unique name of the project for which a + list of instances is requested. Values are of the form + ``projects/{project}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.ListInstancesResponse: + Response message for + BigtableInstanceAdmin.ListInstances. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.ListInstancesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_instances, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_instance(self, + request: Optional[Union[instance.Instance, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Updates an instance within a project. This method + updates only the display name and type for an Instance. + To update other Instance properties, such as labels, use + PartialUpdateInstance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.Instance, dict]]): + The request object. A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Instance: + A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + request = instance.Instance(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_instance, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def partial_update_instance(self, + request: Optional[Union[bigtable_instance_admin.PartialUpdateInstanceRequest, dict]] = None, + *, + instance: Optional[gba_instance.Instance] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Partially updates an instance within a project. This + method can modify all fields of an Instance and is the + preferred way to update an Instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateInstance. + instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`): + Required. The Instance which will + (partially) replace the current value. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The subset of Instance + fields which should be replaced. Must be + explicitly set. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([instance, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if instance is not None: + request.instance = instance + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.partial_update_instance, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("instance.name", request.instance.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gba_instance.Instance, + metadata_type=bigtable_instance_admin.UpdateInstanceMetadata, + ) + + # Done; return the response. + return response + + async def delete_instance(self, + request: Optional[Union[bigtable_instance_admin.DeleteInstanceRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Delete an instance from a project. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.DeleteInstance. + name (:class:`str`): + Required. The unique name of the instance to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.DeleteInstanceRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_instance, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_cluster(self, + request: Optional[Union[bigtable_instance_admin.CreateClusterRequest, dict]] = None, + *, + parent: Optional[str] = None, + cluster_id: Optional[str] = None, + cluster: Optional[instance.Cluster] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a cluster within an instance. + + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.CreateCluster. + parent (:class:`str`): + Required. The unique name of the instance in which to + create the new cluster. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (:class:`str`): + Required. The ID to be used when referring to the new + cluster within its instance, e.g., just ``mycluster`` + rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`): + Required. The cluster to be created. Fields marked + ``OutputOnly`` must be left blank. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, cluster_id, cluster]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.CreateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if cluster_id is not None: + request.cluster_id = cluster_id + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_cluster, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.CreateClusterMetadata, + ) + + # Done; return the response. + return response + + async def get_cluster(self, + request: Optional[Union[bigtable_instance_admin.GetClusterRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Cluster: + r"""Gets information about a cluster. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.GetCluster. + name (:class:`str`): + Required. The unique name of the requested cluster. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Cluster: + A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.GetClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_cluster, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_clusters(self, + request: Optional[Union[bigtable_instance_admin.ListClustersRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListClustersResponse: + r"""Lists information about clusters in an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.ListClusters. + parent (:class:`str`): + Required. The unique name of the instance for which a + list of clusters is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list Clusters for all Instances + in a project, e.g., ``projects/myproject/instances/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.ListClustersResponse: + Response message for + BigtableInstanceAdmin.ListClusters. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.ListClustersRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_clusters, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_cluster(self, + request: Optional[Union[instance.Cluster, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a cluster within an instance. + + Note that UpdateCluster does not support updating + cluster_config.cluster_autoscaling_config. In order to update + it, you must use PartialUpdateCluster. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]]): + The request object. A resizable group of nodes in a particular cloud + location, capable of serving all + [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + request = instance.Cluster(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_cluster, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.UpdateClusterMetadata, + ) + + # Done; return the response. + return response + + async def partial_update_cluster(self, + request: Optional[Union[bigtable_instance_admin.PartialUpdateClusterRequest, dict]] = None, + *, + cluster: Optional[instance.Cluster] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Partially updates a cluster within a project. This method is the + preferred way to update a Cluster. + + To enable and update autoscaling, set + cluster_config.cluster_autoscaling_config. When autoscaling is + enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning + that updates to it are ignored. Note that an update cannot + simultaneously set serve_nodes to non-zero and + cluster_config.cluster_autoscaling_config to non-empty, and also + specify both in the update_mask. + + To disable autoscaling, clear + cluster_config.cluster_autoscaling_config, and explicitly set a + serve_node count via the update_mask. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateCluster. + cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`): + Required. The Cluster which contains the partial updates + to be applied, subject to the update_mask. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The subset of Cluster + fields which should be replaced. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([cluster, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.PartialUpdateClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.partial_update_cluster, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("cluster.name", request.cluster.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.PartialUpdateClusterMetadata, + ) + + # Done; return the response. + return response + + async def delete_cluster(self, + request: Optional[Union[bigtable_instance_admin.DeleteClusterRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a cluster from an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.DeleteCluster. + name (:class:`str`): + Required. The unique name of the cluster to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.DeleteClusterRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_cluster, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_app_profile(self, + request: Optional[Union[bigtable_instance_admin.CreateAppProfileRequest, dict]] = None, + *, + parent: Optional[str] = None, + app_profile_id: Optional[str] = None, + app_profile: Optional[instance.AppProfile] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Creates an app profile within an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.CreateAppProfile. + parent (:class:`str`): + Required. The unique name of the instance in which to + create the new app profile. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (:class:`str`): + Required. The ID to be used when referring to the new + app profile within its instance, e.g., just + ``myprofile`` rather than + ``projects/myproject/instances/myinstance/appProfiles/myprofile``. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`): + Required. The app profile to be created. Fields marked + ``OutputOnly`` will be ignored. + + This corresponds to the ``app_profile`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, app_profile_id, app_profile]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.CreateAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if app_profile_id is not None: + request.app_profile_id = app_profile_id + if app_profile is not None: + request.app_profile = app_profile + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_app_profile, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_app_profile(self, + request: Optional[Union[bigtable_instance_admin.GetAppProfileRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Gets information about an app profile. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.GetAppProfile. + name (:class:`str`): + Required. The unique name of the requested app profile. + Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.GetAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_app_profile, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_app_profiles(self, + request: Optional[Union[bigtable_instance_admin.ListAppProfilesRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAppProfilesAsyncPager: + r"""Lists information about app profiles in an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.ListAppProfiles. + parent (:class:`str`): + Required. The unique name of the instance for which a + list of app profiles is requested. Values are of the + form ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list AppProfiles for all + Instances in a project, e.g., + ``projects/myproject/instances/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesAsyncPager: + Response message for + BigtableInstanceAdmin.ListAppProfiles. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.ListAppProfilesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_app_profiles, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListAppProfilesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_app_profile(self, + request: Optional[Union[bigtable_instance_admin.UpdateAppProfileRequest, dict]] = None, + *, + app_profile: Optional[instance.AppProfile] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates an app profile within an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.UpdateAppProfile. + app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`): + Required. The app profile which will + (partially) replace the current value. + + This corresponds to the ``app_profile`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The subset of app profile + fields which should be replaced. If + unset, all fields will be replaced. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AppProfile` A configuration object describing how Cloud Bigtable should treat traffic + from a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([app_profile, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.UpdateAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if app_profile is not None: + request.app_profile = app_profile + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_app_profile, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("app_profile.name", request.app_profile.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + instance.AppProfile, + metadata_type=bigtable_instance_admin.UpdateAppProfileMetadata, + ) + + # Done; return the response. + return response + + async def delete_app_profile(self, + request: Optional[Union[bigtable_instance_admin.DeleteAppProfileRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an app profile from an instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.DeleteAppProfile. + name (:class:`str`): + Required. The unique name of the app profile to be + deleted. Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.DeleteAppProfileRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_app_profile, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def get_iam_policy(self, + request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, + *, + resource: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the access control policy for an instance + resource. Returns an empty policy if an instance exists + but does not have a policy set. + + Args: + request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): + The request object. Request message for ``GetIamPolicy`` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + elif not request: + request = iam_policy_pb2.GetIamPolicyRequest(resource=resource, ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("resource", request.resource), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy(self, + request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, + *, + resource: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the access control policy on an instance + resource. Replaces any existing policy. + + Args: + request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): + The request object. Request message for ``SetIamPolicy`` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + elif not request: + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("resource", request.resource), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions(self, + request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, + *, + resource: Optional[str] = None, + permissions: Optional[MutableSequence[str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified instance resource. + + Args: + request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): + The request object. Request message for ``TestIamPermissions`` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (:class:`MutableSequence[str]`): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + elif not request: + request = iam_policy_pb2.TestIamPermissionsRequest(resource=resource, permissions=permissions, ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("resource", request.resource), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_hot_tablets(self, + request: Optional[Union[bigtable_instance_admin.ListHotTabletsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListHotTabletsAsyncPager: + r"""Lists hot tablets in a cluster, within the time range + provided. Hot tablets are ordered based on CPU usage. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]]): + The request object. Request message for + BigtableInstanceAdmin.ListHotTablets. + parent (:class:`str`): + Required. The cluster name to list hot tablets. Value is + in the following form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsAsyncPager: + Response message for + BigtableInstanceAdmin.ListHotTablets. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_instance_admin.ListHotTabletsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_hot_tablets, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListHotTabletsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "BigtableInstanceAdminAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "BigtableInstanceAdminAsyncClient", +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py new file mode 100644 index 000000000..a84284a9b --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -0,0 +1,2326 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, cast + +from google.cloud.bigtable_admin_v2 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import instance +from google.cloud.bigtable_admin_v2.types import instance as gba_instance +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import BigtableInstanceAdminGrpcTransport +from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport +from .transports.rest import BigtableInstanceAdminRestTransport + + +class BigtableInstanceAdminClientMeta(type): + """Metaclass for the BigtableInstanceAdmin client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableInstanceAdminTransport]] + _transport_registry["grpc"] = BigtableInstanceAdminGrpcTransport + _transport_registry["grpc_asyncio"] = BigtableInstanceAdminGrpcAsyncIOTransport + _transport_registry["rest"] = BigtableInstanceAdminRestTransport + + def get_transport_class(cls, + label: Optional[str] = None, + ) -> Type[BigtableInstanceAdminTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class BigtableInstanceAdminClient(metaclass=BigtableInstanceAdminClientMeta): + """Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableInstanceAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableInstanceAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableInstanceAdminTransport: + """Returns the transport used by the client instance. + + Returns: + BigtableInstanceAdminTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def app_profile_path(project: str,instance: str,app_profile: str,) -> str: + """Returns a fully-qualified app_profile string.""" + return "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format(project=project, instance=instance, app_profile=app_profile, ) + + @staticmethod + def parse_app_profile_path(path: str) -> Dict[str,str]: + """Parses a app_profile path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/appProfiles/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def cluster_path(project: str,instance: str,cluster: str,) -> str: + """Returns a fully-qualified cluster string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}".format(project=project, instance=instance, cluster=cluster, ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str,str]: + """Parses a cluster path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def crypto_key_path(project: str,location: str,key_ring: str,crypto_key: str,) -> str: + """Returns a fully-qualified crypto_key string.""" + return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format(project=project, location=location, key_ring=key_ring, crypto_key=crypto_key, ) + + @staticmethod + def parse_crypto_key_path(path: str) -> Dict[str,str]: + """Parses a crypto_key path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def hot_tablet_path(project: str,instance: str,cluster: str,hot_tablet: str,) -> str: + """Returns a fully-qualified hot_tablet string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/{hot_tablet}".format(project=project, instance=instance, cluster=cluster, hot_tablet=hot_tablet, ) + + @staticmethod + def parse_hot_tablet_path(path: str) -> Dict[str,str]: + """Parses a hot_tablet path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/hotTablets/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def instance_path(project: str,instance: str,) -> str: + """Returns a fully-qualified instance string.""" + return "projects/{project}/instances/{instance}".format(project=project, instance=instance, ) + + @staticmethod + def parse_instance_path(path: str) -> Dict[str,str]: + """Parses a instance path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def table_path(project: str,instance: str,table: str,) -> str: + """Returns a fully-qualified table string.""" + return "projects/{project}/instances/{instance}/tables/{table}".format(project=project, instance=instance, table=table, ) + + @staticmethod + def parse_table_path(path: str) -> Dict[str,str]: + """Parses a table path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, BigtableInstanceAdminTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the bigtable instance admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, BigtableInstanceAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, BigtableInstanceAdminTransport): + # transport is a BigtableInstanceAdminTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def create_instance(self, + request: Optional[Union[bigtable_instance_admin.CreateInstanceRequest, dict]] = None, + *, + parent: Optional[str] = None, + instance_id: Optional[str] = None, + instance: Optional[gba_instance.Instance] = None, + clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Create an instance within a project. + + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.CreateInstance. + parent (str): + Required. The unique name of the project in which to + create the new instance. Values are of the form + ``projects/{project}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance_id (str): + Required. The ID to be used when referring to the new + instance within its project, e.g., just ``myinstance`` + rather than ``projects/myproject/instances/myinstance``. + + This corresponds to the ``instance_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + instance (google.cloud.bigtable_admin_v2.types.Instance): + Required. The instance to create. Fields marked + ``OutputOnly`` must be left blank. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + clusters (MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]): + Required. The clusters to be created within the + instance, mapped by desired cluster ID, e.g., just + ``mycluster`` rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + Fields marked ``OutputOnly`` must be left blank. + Currently, at most four clusters can be specified. + + This corresponds to the ``clusters`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, instance_id, instance, clusters]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.CreateInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.CreateInstanceRequest): + request = bigtable_instance_admin.CreateInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if instance_id is not None: + request.instance_id = instance_id + if instance is not None: + request.instance = instance + if clusters is not None: + request.clusters = clusters + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gba_instance.Instance, + metadata_type=bigtable_instance_admin.CreateInstanceMetadata, + ) + + # Done; return the response. + return response + + def get_instance(self, + request: Optional[Union[bigtable_instance_admin.GetInstanceRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Gets information about an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.GetInstance. + name (str): + Required. The unique name of the requested instance. + Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Instance: + A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.GetInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.GetInstanceRequest): + request = bigtable_instance_admin.GetInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_instances(self, + request: Optional[Union[bigtable_instance_admin.ListInstancesRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListInstancesResponse: + r"""Lists information about instances in a project. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.ListInstances. + parent (str): + Required. The unique name of the project for which a + list of instances is requested. Values are of the form + ``projects/{project}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.ListInstancesResponse: + Response message for + BigtableInstanceAdmin.ListInstances. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.ListInstancesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.ListInstancesRequest): + request = bigtable_instance_admin.ListInstancesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_instances] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_instance(self, + request: Optional[Union[instance.Instance, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Instance: + r"""Updates an instance within a project. This method + updates only the display name and type for an Instance. + To update other Instance properties, such as labels, use + PartialUpdateInstance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.Instance, dict]): + The request object. A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Instance: + A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a instance.Instance. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, instance.Instance): + request = instance.Instance(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def partial_update_instance(self, + request: Optional[Union[bigtable_instance_admin.PartialUpdateInstanceRequest, dict]] = None, + *, + instance: Optional[gba_instance.Instance] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Partially updates an instance within a project. This + method can modify all fields of an Instance and is the + preferred way to update an Instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateInstance. + instance (google.cloud.bigtable_admin_v2.types.Instance): + Required. The Instance which will + (partially) replace the current value. + + This corresponds to the ``instance`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of Instance + fields which should be replaced. Must be + explicitly set. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and + the resources that serve them. All tables in an + instance are served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([instance, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.PartialUpdateInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.PartialUpdateInstanceRequest): + request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if instance is not None: + request.instance = instance + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.partial_update_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("instance.name", request.instance.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gba_instance.Instance, + metadata_type=bigtable_instance_admin.UpdateInstanceMetadata, + ) + + # Done; return the response. + return response + + def delete_instance(self, + request: Optional[Union[bigtable_instance_admin.DeleteInstanceRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Delete an instance from a project. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.DeleteInstance. + name (str): + Required. The unique name of the instance to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.DeleteInstanceRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.DeleteInstanceRequest): + request = bigtable_instance_admin.DeleteInstanceRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_instance] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_cluster(self, + request: Optional[Union[bigtable_instance_admin.CreateClusterRequest, dict]] = None, + *, + parent: Optional[str] = None, + cluster_id: Optional[str] = None, + cluster: Optional[instance.Cluster] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a cluster within an instance. + + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.CreateCluster. + parent (str): + Required. The unique name of the instance in which to + create the new cluster. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster_id (str): + Required. The ID to be used when referring to the new + cluster within its instance, e.g., just ``mycluster`` + rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + + This corresponds to the ``cluster_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (google.cloud.bigtable_admin_v2.types.Cluster): + Required. The cluster to be created. Fields marked + ``OutputOnly`` must be left blank. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, cluster_id, cluster]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.CreateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.CreateClusterRequest): + request = bigtable_instance_admin.CreateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if cluster_id is not None: + request.cluster_id = cluster_id + if cluster is not None: + request.cluster = cluster + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.CreateClusterMetadata, + ) + + # Done; return the response. + return response + + def get_cluster(self, + request: Optional[Union[bigtable_instance_admin.GetClusterRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.Cluster: + r"""Gets information about a cluster. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.GetCluster. + name (str): + Required. The unique name of the requested cluster. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Cluster: + A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.GetClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.GetClusterRequest): + request = bigtable_instance_admin.GetClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_clusters(self, + request: Optional[Union[bigtable_instance_admin.ListClustersRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_instance_admin.ListClustersResponse: + r"""Lists information about clusters in an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.ListClusters. + parent (str): + Required. The unique name of the instance for which a + list of clusters is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list Clusters for all Instances + in a project, e.g., ``projects/myproject/instances/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.ListClustersResponse: + Response message for + BigtableInstanceAdmin.ListClusters. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.ListClustersRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.ListClustersRequest): + request = bigtable_instance_admin.ListClustersRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_clusters] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_cluster(self, + request: Optional[Union[instance.Cluster, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates a cluster within an instance. + + Note that UpdateCluster does not support updating + cluster_config.cluster_autoscaling_config. In order to update + it, you must use PartialUpdateCluster. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]): + The request object. A resizable group of nodes in a particular cloud + location, capable of serving all + [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a instance.Cluster. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, instance.Cluster): + request = instance.Cluster(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.UpdateClusterMetadata, + ) + + # Done; return the response. + return response + + def partial_update_cluster(self, + request: Optional[Union[bigtable_instance_admin.PartialUpdateClusterRequest, dict]] = None, + *, + cluster: Optional[instance.Cluster] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Partially updates a cluster within a project. This method is the + preferred way to update a Cluster. + + To enable and update autoscaling, set + cluster_config.cluster_autoscaling_config. When autoscaling is + enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning + that updates to it are ignored. Note that an update cannot + simultaneously set serve_nodes to non-zero and + cluster_config.cluster_autoscaling_config to non-empty, and also + specify both in the update_mask. + + To disable autoscaling, clear + cluster_config.cluster_autoscaling_config, and explicitly set a + serve_node count via the update_mask. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateCluster. + cluster (google.cloud.bigtable_admin_v2.types.Cluster): + Required. The Cluster which contains the partial updates + to be applied, subject to the update_mask. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of Cluster + fields which should be replaced. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable + of serving all + [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([cluster, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.PartialUpdateClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.PartialUpdateClusterRequest): + request = bigtable_instance_admin.PartialUpdateClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if cluster is not None: + request.cluster = cluster + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.partial_update_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("cluster.name", request.cluster.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.Cluster, + metadata_type=bigtable_instance_admin.PartialUpdateClusterMetadata, + ) + + # Done; return the response. + return response + + def delete_cluster(self, + request: Optional[Union[bigtable_instance_admin.DeleteClusterRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a cluster from an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.DeleteCluster. + name (str): + Required. The unique name of the cluster to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.DeleteClusterRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.DeleteClusterRequest): + request = bigtable_instance_admin.DeleteClusterRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_cluster] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_app_profile(self, + request: Optional[Union[bigtable_instance_admin.CreateAppProfileRequest, dict]] = None, + *, + parent: Optional[str] = None, + app_profile_id: Optional[str] = None, + app_profile: Optional[instance.AppProfile] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Creates an app profile within an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.CreateAppProfile. + parent (str): + Required. The unique name of the instance in which to + create the new app profile. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile_id (str): + Required. The ID to be used when referring to the new + app profile within its instance, e.g., just + ``myprofile`` rather than + ``projects/myproject/instances/myinstance/appProfiles/myprofile``. + + This corresponds to the ``app_profile_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): + Required. The app profile to be created. Fields marked + ``OutputOnly`` will be ignored. + + This corresponds to the ``app_profile`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, app_profile_id, app_profile]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.CreateAppProfileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.CreateAppProfileRequest): + request = bigtable_instance_admin.CreateAppProfileRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if app_profile_id is not None: + request.app_profile_id = app_profile_id + if app_profile is not None: + request.app_profile = app_profile + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_app_profile] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_app_profile(self, + request: Optional[Union[bigtable_instance_admin.GetAppProfileRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> instance.AppProfile: + r"""Gets information about an app profile. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.GetAppProfile. + name (str): + Required. The unique name of the requested app profile. + Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.GetAppProfileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.GetAppProfileRequest): + request = bigtable_instance_admin.GetAppProfileRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_app_profile] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_app_profiles(self, + request: Optional[Union[bigtable_instance_admin.ListAppProfilesRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListAppProfilesPager: + r"""Lists information about app profiles in an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.ListAppProfiles. + parent (str): + Required. The unique name of the instance for which a + list of app profiles is requested. Values are of the + form ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list AppProfiles for all + Instances in a project, e.g., + ``projects/myproject/instances/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesPager: + Response message for + BigtableInstanceAdmin.ListAppProfiles. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.ListAppProfilesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.ListAppProfilesRequest): + request = bigtable_instance_admin.ListAppProfilesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_app_profiles] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListAppProfilesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_app_profile(self, + request: Optional[Union[bigtable_instance_admin.UpdateAppProfileRequest, dict]] = None, + *, + app_profile: Optional[instance.AppProfile] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates an app profile within an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.UpdateAppProfile. + app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): + Required. The app profile which will + (partially) replace the current value. + + This corresponds to the ``app_profile`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of app profile + fields which should be replaced. If + unset, all fields will be replaced. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AppProfile` A configuration object describing how Cloud Bigtable should treat traffic + from a particular end user application. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([app_profile, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.UpdateAppProfileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.UpdateAppProfileRequest): + request = bigtable_instance_admin.UpdateAppProfileRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if app_profile is not None: + request.app_profile = app_profile + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_app_profile] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("app_profile.name", request.app_profile.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + instance.AppProfile, + metadata_type=bigtable_instance_admin.UpdateAppProfileMetadata, + ) + + # Done; return the response. + return response + + def delete_app_profile(self, + request: Optional[Union[bigtable_instance_admin.DeleteAppProfileRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes an app profile from an instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.DeleteAppProfile. + name (str): + Required. The unique name of the app profile to be + deleted. Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.DeleteAppProfileRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.DeleteAppProfileRequest): + request = bigtable_instance_admin.DeleteAppProfileRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_app_profile] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def get_iam_policy(self, + request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, + *, + resource: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the access control policy for an instance + resource. Returns an empty policy if an instance exists + but does not have a policy set. + + Args: + request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): + The request object. Request message for ``GetIamPolicy`` method. + resource (str): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + request = iam_policy_pb2.GetIamPolicyRequest(**request) + elif not request: + # Null request, just make one. + request = iam_policy_pb2.GetIamPolicyRequest() + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("resource", request.resource), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, + *, + resource: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the access control policy on an instance + resource. Replaces any existing policy. + + Args: + request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): + The request object. Request message for ``SetIamPolicy`` method. + resource (str): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + request = iam_policy_pb2.SetIamPolicyRequest(**request) + elif not request: + # Null request, just make one. + request = iam_policy_pb2.SetIamPolicyRequest() + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("resource", request.resource), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, + *, + resource: Optional[str] = None, + permissions: Optional[MutableSequence[str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified instance resource. + + Args: + request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): + The request object. Request message for ``TestIamPermissions`` method. + resource (str): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (MutableSequence[str]): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + elif not request: + # Null request, just make one. + request = iam_policy_pb2.TestIamPermissionsRequest() + if resource is not None: + request.resource = resource + if permissions: + request.permissions.extend(permissions) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("resource", request.resource), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_hot_tablets(self, + request: Optional[Union[bigtable_instance_admin.ListHotTabletsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListHotTabletsPager: + r"""Lists hot tablets in a cluster, within the time range + provided. Hot tablets are ordered based on CPU usage. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]): + The request object. Request message for + BigtableInstanceAdmin.ListHotTablets. + parent (str): + Required. The cluster name to list hot tablets. Value is + in the following form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsPager: + Response message for + BigtableInstanceAdmin.ListHotTablets. + Iterating over this object will yield + results and resolve additional pages + automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_instance_admin.ListHotTabletsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_instance_admin.ListHotTabletsRequest): + request = bigtable_instance_admin.ListHotTabletsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_hot_tablets] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListHotTabletsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "BigtableInstanceAdminClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + + + + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "BigtableInstanceAdminClient", +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py new file mode 100644 index 000000000..d480cc430 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -0,0 +1,261 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance + + +class ListAppProfilesPager: + """A pager for iterating through ``list_app_profiles`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``app_profiles`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListAppProfiles`` requests and continue to iterate + through the ``app_profiles`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., bigtable_instance_admin.ListAppProfilesResponse], + request: bigtable_instance_admin.ListAppProfilesRequest, + response: bigtable_instance_admin.ListAppProfilesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_instance_admin.ListAppProfilesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_instance_admin.ListAppProfilesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[instance.AppProfile]: + for page in self.pages: + yield from page.app_profiles + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListAppProfilesAsyncPager: + """A pager for iterating through ``list_app_profiles`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``app_profiles`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListAppProfiles`` requests and continue to iterate + through the ``app_profiles`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[bigtable_instance_admin.ListAppProfilesResponse]], + request: bigtable_instance_admin.ListAppProfilesRequest, + response: bigtable_instance_admin.ListAppProfilesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_instance_admin.ListAppProfilesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[bigtable_instance_admin.ListAppProfilesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[instance.AppProfile]: + async def async_generator(): + async for page in self.pages: + for response in page.app_profiles: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListHotTabletsPager: + """A pager for iterating through ``list_hot_tablets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``hot_tablets`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListHotTablets`` requests and continue to iterate + through the ``hot_tablets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., bigtable_instance_admin.ListHotTabletsResponse], + request: bigtable_instance_admin.ListHotTabletsRequest, + response: bigtable_instance_admin.ListHotTabletsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_instance_admin.ListHotTabletsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_instance_admin.ListHotTabletsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[instance.HotTablet]: + for page in self.pages: + yield from page.hot_tablets + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListHotTabletsAsyncPager: + """A pager for iterating through ``list_hot_tablets`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``hot_tablets`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListHotTablets`` requests and continue to iterate + through the ``hot_tablets`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[bigtable_instance_admin.ListHotTabletsResponse]], + request: bigtable_instance_admin.ListHotTabletsRequest, + response: bigtable_instance_admin.ListHotTabletsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_instance_admin.ListHotTabletsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[bigtable_instance_admin.ListHotTabletsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[instance.HotTablet]: + async def async_generator(): + async for page in self.pages: + for response in page.hot_tablets: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py new file mode 100644 index 000000000..bfba48436 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import BigtableInstanceAdminTransport +from .grpc import BigtableInstanceAdminGrpcTransport +from .grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport +from .rest import BigtableInstanceAdminRestTransport +from .rest import BigtableInstanceAdminRestInterceptor + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[BigtableInstanceAdminTransport]] +_transport_registry['grpc'] = BigtableInstanceAdminGrpcTransport +_transport_registry['grpc_asyncio'] = BigtableInstanceAdminGrpcAsyncIOTransport +_transport_registry['rest'] = BigtableInstanceAdminRestTransport + +__all__ = ( + 'BigtableInstanceAdminTransport', + 'BigtableInstanceAdminGrpcTransport', + 'BigtableInstanceAdminGrpcAsyncIOTransport', + 'BigtableInstanceAdminRestTransport', + 'BigtableInstanceAdminRestInterceptor', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py new file mode 100644 index 000000000..b5b508a41 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -0,0 +1,536 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.bigtable_admin_v2 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +class BigtableInstanceAdminTransport(abc.ABC): + """Abstract transport class for BigtableInstanceAdmin.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.cluster', + 'https://www.googleapis.com/auth/bigtable.admin.instance', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', + ) + + DEFAULT_HOST: str = 'bigtableadmin.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience(api_audience if api_audience else host) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_instance: gapic_v1.method.wrap_method( + self.create_instance, + default_timeout=300.0, + client_info=client_info, + ), + self.get_instance: gapic_v1.method.wrap_method( + self.get_instance, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_instances: gapic_v1.method.wrap_method( + self.list_instances, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_instance: gapic_v1.method.wrap_method( + self.update_instance, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.partial_update_instance: gapic_v1.method.wrap_method( + self.partial_update_instance, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_instance: gapic_v1.method.wrap_method( + self.delete_instance, + default_timeout=60.0, + client_info=client_info, + ), + self.create_cluster: gapic_v1.method.wrap_method( + self.create_cluster, + default_timeout=60.0, + client_info=client_info, + ), + self.get_cluster: gapic_v1.method.wrap_method( + self.get_cluster, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_clusters: gapic_v1.method.wrap_method( + self.list_clusters, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_cluster: gapic_v1.method.wrap_method( + self.update_cluster, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.partial_update_cluster: gapic_v1.method.wrap_method( + self.partial_update_cluster, + default_timeout=None, + client_info=client_info, + ), + self.delete_cluster: gapic_v1.method.wrap_method( + self.delete_cluster, + default_timeout=60.0, + client_info=client_info, + ), + self.create_app_profile: gapic_v1.method.wrap_method( + self.create_app_profile, + default_timeout=60.0, + client_info=client_info, + ), + self.get_app_profile: gapic_v1.method.wrap_method( + self.get_app_profile, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_app_profiles: gapic_v1.method.wrap_method( + self.list_app_profiles, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_app_profile: gapic_v1.method.wrap_method( + self.update_app_profile, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_app_profile: gapic_v1.method.wrap_method( + self.delete_app_profile, + default_timeout=60.0, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=60.0, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_hot_tablets: gapic_v1.method.wrap_method( + self.list_hot_tablets, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_instance(self) -> Callable[ + [bigtable_instance_admin.CreateInstanceRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_instance(self) -> Callable[ + [bigtable_instance_admin.GetInstanceRequest], + Union[ + instance.Instance, + Awaitable[instance.Instance] + ]]: + raise NotImplementedError() + + @property + def list_instances(self) -> Callable[ + [bigtable_instance_admin.ListInstancesRequest], + Union[ + bigtable_instance_admin.ListInstancesResponse, + Awaitable[bigtable_instance_admin.ListInstancesResponse] + ]]: + raise NotImplementedError() + + @property + def update_instance(self) -> Callable[ + [instance.Instance], + Union[ + instance.Instance, + Awaitable[instance.Instance] + ]]: + raise NotImplementedError() + + @property + def partial_update_instance(self) -> Callable[ + [bigtable_instance_admin.PartialUpdateInstanceRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_instance(self) -> Callable[ + [bigtable_instance_admin.DeleteInstanceRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_cluster(self) -> Callable[ + [bigtable_instance_admin.CreateClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_cluster(self) -> Callable[ + [bigtable_instance_admin.GetClusterRequest], + Union[ + instance.Cluster, + Awaitable[instance.Cluster] + ]]: + raise NotImplementedError() + + @property + def list_clusters(self) -> Callable[ + [bigtable_instance_admin.ListClustersRequest], + Union[ + bigtable_instance_admin.ListClustersResponse, + Awaitable[bigtable_instance_admin.ListClustersResponse] + ]]: + raise NotImplementedError() + + @property + def update_cluster(self) -> Callable[ + [instance.Cluster], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def partial_update_cluster(self) -> Callable[ + [bigtable_instance_admin.PartialUpdateClusterRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_cluster(self) -> Callable[ + [bigtable_instance_admin.DeleteClusterRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_app_profile(self) -> Callable[ + [bigtable_instance_admin.CreateAppProfileRequest], + Union[ + instance.AppProfile, + Awaitable[instance.AppProfile] + ]]: + raise NotImplementedError() + + @property + def get_app_profile(self) -> Callable[ + [bigtable_instance_admin.GetAppProfileRequest], + Union[ + instance.AppProfile, + Awaitable[instance.AppProfile] + ]]: + raise NotImplementedError() + + @property + def list_app_profiles(self) -> Callable[ + [bigtable_instance_admin.ListAppProfilesRequest], + Union[ + bigtable_instance_admin.ListAppProfilesResponse, + Awaitable[bigtable_instance_admin.ListAppProfilesResponse] + ]]: + raise NotImplementedError() + + @property + def update_app_profile(self) -> Callable[ + [bigtable_instance_admin.UpdateAppProfileRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_app_profile(self) -> Callable[ + [bigtable_instance_admin.DeleteAppProfileRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[ + policy_pb2.Policy, + Awaitable[policy_pb2.Policy] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[ + policy_pb2.Policy, + Awaitable[policy_pb2.Policy] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse] + ]]: + raise NotImplementedError() + + @property + def list_hot_tablets(self) -> Callable[ + [bigtable_instance_admin.ListHotTabletsRequest], + Union[ + bigtable_instance_admin.ListHotTabletsResponse, + Awaitable[bigtable_instance_admin.ListHotTabletsResponse] + ]]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'BigtableInstanceAdminTransport', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py new file mode 100644 index 000000000..d0143d0b8 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -0,0 +1,849 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO + + +class BigtableInstanceAdminGrpcTransport(BigtableInstanceAdminTransport): + """gRPC backend transport for BigtableInstanceAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'bigtableadmin.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'bigtableadmin.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_instance(self) -> Callable[ + [bigtable_instance_admin.CreateInstanceRequest], + operations_pb2.Operation]: + r"""Return a callable for the create instance method over gRPC. + + Create an instance within a project. + + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + + Returns: + Callable[[~.CreateInstanceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_instance' not in self._stubs: + self._stubs['create_instance'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance', + request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_instance'] + + @property + def get_instance(self) -> Callable[ + [bigtable_instance_admin.GetInstanceRequest], + instance.Instance]: + r"""Return a callable for the get instance method over gRPC. + + Gets information about an instance. + + Returns: + Callable[[~.GetInstanceRequest], + ~.Instance]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_instance' not in self._stubs: + self._stubs['get_instance'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance', + request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, + response_deserializer=instance.Instance.deserialize, + ) + return self._stubs['get_instance'] + + @property + def list_instances(self) -> Callable[ + [bigtable_instance_admin.ListInstancesRequest], + bigtable_instance_admin.ListInstancesResponse]: + r"""Return a callable for the list instances method over gRPC. + + Lists information about instances in a project. + + Returns: + Callable[[~.ListInstancesRequest], + ~.ListInstancesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_instances' not in self._stubs: + self._stubs['list_instances'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances', + request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, + response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, + ) + return self._stubs['list_instances'] + + @property + def update_instance(self) -> Callable[ + [instance.Instance], + instance.Instance]: + r"""Return a callable for the update instance method over gRPC. + + Updates an instance within a project. This method + updates only the display name and type for an Instance. + To update other Instance properties, such as labels, use + PartialUpdateInstance. + + Returns: + Callable[[~.Instance], + ~.Instance]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_instance' not in self._stubs: + self._stubs['update_instance'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance', + request_serializer=instance.Instance.serialize, + response_deserializer=instance.Instance.deserialize, + ) + return self._stubs['update_instance'] + + @property + def partial_update_instance(self) -> Callable[ + [bigtable_instance_admin.PartialUpdateInstanceRequest], + operations_pb2.Operation]: + r"""Return a callable for the partial update instance method over gRPC. + + Partially updates an instance within a project. This + method can modify all fields of an Instance and is the + preferred way to update an Instance. + + Returns: + Callable[[~.PartialUpdateInstanceRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'partial_update_instance' not in self._stubs: + self._stubs['partial_update_instance'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance', + request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['partial_update_instance'] + + @property + def delete_instance(self) -> Callable[ + [bigtable_instance_admin.DeleteInstanceRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete instance method over gRPC. + + Delete an instance from a project. + + Returns: + Callable[[~.DeleteInstanceRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_instance' not in self._stubs: + self._stubs['delete_instance'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance', + request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_instance'] + + @property + def create_cluster(self) -> Callable[ + [bigtable_instance_admin.CreateClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster within an instance. + + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + + Returns: + Callable[[~.CreateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_cluster' not in self._stubs: + self._stubs['create_cluster'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster', + request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_cluster'] + + @property + def get_cluster(self) -> Callable[ + [bigtable_instance_admin.GetClusterRequest], + instance.Cluster]: + r"""Return a callable for the get cluster method over gRPC. + + Gets information about a cluster. + + Returns: + Callable[[~.GetClusterRequest], + ~.Cluster]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_cluster' not in self._stubs: + self._stubs['get_cluster'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster', + request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, + response_deserializer=instance.Cluster.deserialize, + ) + return self._stubs['get_cluster'] + + @property + def list_clusters(self) -> Callable[ + [bigtable_instance_admin.ListClustersRequest], + bigtable_instance_admin.ListClustersResponse]: + r"""Return a callable for the list clusters method over gRPC. + + Lists information about clusters in an instance. + + Returns: + Callable[[~.ListClustersRequest], + ~.ListClustersResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_clusters' not in self._stubs: + self._stubs['list_clusters'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters', + request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, + response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, + ) + return self._stubs['list_clusters'] + + @property + def update_cluster(self) -> Callable[ + [instance.Cluster], + operations_pb2.Operation]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster within an instance. + + Note that UpdateCluster does not support updating + cluster_config.cluster_autoscaling_config. In order to update + it, you must use PartialUpdateCluster. + + Returns: + Callable[[~.Cluster], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_cluster' not in self._stubs: + self._stubs['update_cluster'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster', + request_serializer=instance.Cluster.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_cluster'] + + @property + def partial_update_cluster(self) -> Callable[ + [bigtable_instance_admin.PartialUpdateClusterRequest], + operations_pb2.Operation]: + r"""Return a callable for the partial update cluster method over gRPC. + + Partially updates a cluster within a project. This method is the + preferred way to update a Cluster. + + To enable and update autoscaling, set + cluster_config.cluster_autoscaling_config. When autoscaling is + enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning + that updates to it are ignored. Note that an update cannot + simultaneously set serve_nodes to non-zero and + cluster_config.cluster_autoscaling_config to non-empty, and also + specify both in the update_mask. + + To disable autoscaling, clear + cluster_config.cluster_autoscaling_config, and explicitly set a + serve_node count via the update_mask. + + Returns: + Callable[[~.PartialUpdateClusterRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'partial_update_cluster' not in self._stubs: + self._stubs['partial_update_cluster'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster', + request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['partial_update_cluster'] + + @property + def delete_cluster(self) -> Callable[ + [bigtable_instance_admin.DeleteClusterRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster from an instance. + + Returns: + Callable[[~.DeleteClusterRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_cluster' not in self._stubs: + self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster', + request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_cluster'] + + @property + def create_app_profile(self) -> Callable[ + [bigtable_instance_admin.CreateAppProfileRequest], + instance.AppProfile]: + r"""Return a callable for the create app profile method over gRPC. + + Creates an app profile within an instance. + + Returns: + Callable[[~.CreateAppProfileRequest], + ~.AppProfile]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_app_profile' not in self._stubs: + self._stubs['create_app_profile'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile', + request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, + response_deserializer=instance.AppProfile.deserialize, + ) + return self._stubs['create_app_profile'] + + @property + def get_app_profile(self) -> Callable[ + [bigtable_instance_admin.GetAppProfileRequest], + instance.AppProfile]: + r"""Return a callable for the get app profile method over gRPC. + + Gets information about an app profile. + + Returns: + Callable[[~.GetAppProfileRequest], + ~.AppProfile]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_app_profile' not in self._stubs: + self._stubs['get_app_profile'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile', + request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, + response_deserializer=instance.AppProfile.deserialize, + ) + return self._stubs['get_app_profile'] + + @property + def list_app_profiles(self) -> Callable[ + [bigtable_instance_admin.ListAppProfilesRequest], + bigtable_instance_admin.ListAppProfilesResponse]: + r"""Return a callable for the list app profiles method over gRPC. + + Lists information about app profiles in an instance. + + Returns: + Callable[[~.ListAppProfilesRequest], + ~.ListAppProfilesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_app_profiles' not in self._stubs: + self._stubs['list_app_profiles'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles', + request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, + response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, + ) + return self._stubs['list_app_profiles'] + + @property + def update_app_profile(self) -> Callable[ + [bigtable_instance_admin.UpdateAppProfileRequest], + operations_pb2.Operation]: + r"""Return a callable for the update app profile method over gRPC. + + Updates an app profile within an instance. + + Returns: + Callable[[~.UpdateAppProfileRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_app_profile' not in self._stubs: + self._stubs['update_app_profile'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile', + request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_app_profile'] + + @property + def delete_app_profile(self) -> Callable[ + [bigtable_instance_admin.DeleteAppProfileRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete app profile method over gRPC. + + Deletes an app profile from an instance. + + Returns: + Callable[[~.DeleteAppProfileRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_app_profile' not in self._stubs: + self._stubs['delete_app_profile'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile', + request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_app_profile'] + + @property + def get_iam_policy(self) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for an instance + resource. Returns an empty policy if an instance exists + but does not have a policy set. + + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_iam_policy' not in self._stubs: + self._stubs['get_iam_policy'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy', + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs['get_iam_policy'] + + @property + def set_iam_policy(self) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on an instance + resource. Replaces any existing policy. + + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_iam_policy' not in self._stubs: + self._stubs['set_iam_policy'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy', + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs['set_iam_policy'] + + @property + def test_iam_permissions(self) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the + specified instance resource. + + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'test_iam_permissions' not in self._stubs: + self._stubs['test_iam_permissions'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions', + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs['test_iam_permissions'] + + @property + def list_hot_tablets(self) -> Callable[ + [bigtable_instance_admin.ListHotTabletsRequest], + bigtable_instance_admin.ListHotTabletsResponse]: + r"""Return a callable for the list hot tablets method over gRPC. + + Lists hot tablets in a cluster, within the time range + provided. Hot tablets are ordered based on CPU usage. + + Returns: + Callable[[~.ListHotTabletsRequest], + ~.ListHotTabletsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_hot_tablets' not in self._stubs: + self._stubs['list_hot_tablets'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets', + request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, + response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, + ) + return self._stubs['list_hot_tablets'] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'BigtableInstanceAdminGrpcTransport', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py new file mode 100644 index 000000000..b44fb1ff5 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -0,0 +1,848 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO +from .grpc import BigtableInstanceAdminGrpcTransport + + +class BigtableInstanceAdminGrpcAsyncIOTransport(BigtableInstanceAdminTransport): + """gRPC AsyncIO backend transport for BigtableInstanceAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'bigtableadmin.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'bigtableadmin.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_instance(self) -> Callable[ + [bigtable_instance_admin.CreateInstanceRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create instance method over gRPC. + + Create an instance within a project. + + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + + Returns: + Callable[[~.CreateInstanceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_instance' not in self._stubs: + self._stubs['create_instance'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance', + request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_instance'] + + @property + def get_instance(self) -> Callable[ + [bigtable_instance_admin.GetInstanceRequest], + Awaitable[instance.Instance]]: + r"""Return a callable for the get instance method over gRPC. + + Gets information about an instance. + + Returns: + Callable[[~.GetInstanceRequest], + Awaitable[~.Instance]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_instance' not in self._stubs: + self._stubs['get_instance'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance', + request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, + response_deserializer=instance.Instance.deserialize, + ) + return self._stubs['get_instance'] + + @property + def list_instances(self) -> Callable[ + [bigtable_instance_admin.ListInstancesRequest], + Awaitable[bigtable_instance_admin.ListInstancesResponse]]: + r"""Return a callable for the list instances method over gRPC. + + Lists information about instances in a project. + + Returns: + Callable[[~.ListInstancesRequest], + Awaitable[~.ListInstancesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_instances' not in self._stubs: + self._stubs['list_instances'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances', + request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, + response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, + ) + return self._stubs['list_instances'] + + @property + def update_instance(self) -> Callable[ + [instance.Instance], + Awaitable[instance.Instance]]: + r"""Return a callable for the update instance method over gRPC. + + Updates an instance within a project. This method + updates only the display name and type for an Instance. + To update other Instance properties, such as labels, use + PartialUpdateInstance. + + Returns: + Callable[[~.Instance], + Awaitable[~.Instance]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_instance' not in self._stubs: + self._stubs['update_instance'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance', + request_serializer=instance.Instance.serialize, + response_deserializer=instance.Instance.deserialize, + ) + return self._stubs['update_instance'] + + @property + def partial_update_instance(self) -> Callable[ + [bigtable_instance_admin.PartialUpdateInstanceRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the partial update instance method over gRPC. + + Partially updates an instance within a project. This + method can modify all fields of an Instance and is the + preferred way to update an Instance. + + Returns: + Callable[[~.PartialUpdateInstanceRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'partial_update_instance' not in self._stubs: + self._stubs['partial_update_instance'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance', + request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['partial_update_instance'] + + @property + def delete_instance(self) -> Callable[ + [bigtable_instance_admin.DeleteInstanceRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete instance method over gRPC. + + Delete an instance from a project. + + Returns: + Callable[[~.DeleteInstanceRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_instance' not in self._stubs: + self._stubs['delete_instance'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance', + request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_instance'] + + @property + def create_cluster(self) -> Callable[ + [bigtable_instance_admin.CreateClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create cluster method over gRPC. + + Creates a cluster within an instance. + + Note that exactly one of Cluster.serve_nodes and + Cluster.cluster_config.cluster_autoscaling_config can be set. If + serve_nodes is set to non-zero, then the cluster is manually + scaled. If cluster_config.cluster_autoscaling_config is + non-empty, then autoscaling is enabled. + + Returns: + Callable[[~.CreateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_cluster' not in self._stubs: + self._stubs['create_cluster'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster', + request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_cluster'] + + @property + def get_cluster(self) -> Callable[ + [bigtable_instance_admin.GetClusterRequest], + Awaitable[instance.Cluster]]: + r"""Return a callable for the get cluster method over gRPC. + + Gets information about a cluster. + + Returns: + Callable[[~.GetClusterRequest], + Awaitable[~.Cluster]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_cluster' not in self._stubs: + self._stubs['get_cluster'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster', + request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, + response_deserializer=instance.Cluster.deserialize, + ) + return self._stubs['get_cluster'] + + @property + def list_clusters(self) -> Callable[ + [bigtable_instance_admin.ListClustersRequest], + Awaitable[bigtable_instance_admin.ListClustersResponse]]: + r"""Return a callable for the list clusters method over gRPC. + + Lists information about clusters in an instance. + + Returns: + Callable[[~.ListClustersRequest], + Awaitable[~.ListClustersResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_clusters' not in self._stubs: + self._stubs['list_clusters'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters', + request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, + response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, + ) + return self._stubs['list_clusters'] + + @property + def update_cluster(self) -> Callable[ + [instance.Cluster], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update cluster method over gRPC. + + Updates a cluster within an instance. + + Note that UpdateCluster does not support updating + cluster_config.cluster_autoscaling_config. In order to update + it, you must use PartialUpdateCluster. + + Returns: + Callable[[~.Cluster], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_cluster' not in self._stubs: + self._stubs['update_cluster'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster', + request_serializer=instance.Cluster.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_cluster'] + + @property + def partial_update_cluster(self) -> Callable[ + [bigtable_instance_admin.PartialUpdateClusterRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the partial update cluster method over gRPC. + + Partially updates a cluster within a project. This method is the + preferred way to update a Cluster. + + To enable and update autoscaling, set + cluster_config.cluster_autoscaling_config. When autoscaling is + enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning + that updates to it are ignored. Note that an update cannot + simultaneously set serve_nodes to non-zero and + cluster_config.cluster_autoscaling_config to non-empty, and also + specify both in the update_mask. + + To disable autoscaling, clear + cluster_config.cluster_autoscaling_config, and explicitly set a + serve_node count via the update_mask. + + Returns: + Callable[[~.PartialUpdateClusterRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'partial_update_cluster' not in self._stubs: + self._stubs['partial_update_cluster'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster', + request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['partial_update_cluster'] + + @property + def delete_cluster(self) -> Callable[ + [bigtable_instance_admin.DeleteClusterRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete cluster method over gRPC. + + Deletes a cluster from an instance. + + Returns: + Callable[[~.DeleteClusterRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_cluster' not in self._stubs: + self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster', + request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_cluster'] + + @property + def create_app_profile(self) -> Callable[ + [bigtable_instance_admin.CreateAppProfileRequest], + Awaitable[instance.AppProfile]]: + r"""Return a callable for the create app profile method over gRPC. + + Creates an app profile within an instance. + + Returns: + Callable[[~.CreateAppProfileRequest], + Awaitable[~.AppProfile]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_app_profile' not in self._stubs: + self._stubs['create_app_profile'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile', + request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, + response_deserializer=instance.AppProfile.deserialize, + ) + return self._stubs['create_app_profile'] + + @property + def get_app_profile(self) -> Callable[ + [bigtable_instance_admin.GetAppProfileRequest], + Awaitable[instance.AppProfile]]: + r"""Return a callable for the get app profile method over gRPC. + + Gets information about an app profile. + + Returns: + Callable[[~.GetAppProfileRequest], + Awaitable[~.AppProfile]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_app_profile' not in self._stubs: + self._stubs['get_app_profile'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile', + request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, + response_deserializer=instance.AppProfile.deserialize, + ) + return self._stubs['get_app_profile'] + + @property + def list_app_profiles(self) -> Callable[ + [bigtable_instance_admin.ListAppProfilesRequest], + Awaitable[bigtable_instance_admin.ListAppProfilesResponse]]: + r"""Return a callable for the list app profiles method over gRPC. + + Lists information about app profiles in an instance. + + Returns: + Callable[[~.ListAppProfilesRequest], + Awaitable[~.ListAppProfilesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_app_profiles' not in self._stubs: + self._stubs['list_app_profiles'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles', + request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, + response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, + ) + return self._stubs['list_app_profiles'] + + @property + def update_app_profile(self) -> Callable[ + [bigtable_instance_admin.UpdateAppProfileRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update app profile method over gRPC. + + Updates an app profile within an instance. + + Returns: + Callable[[~.UpdateAppProfileRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_app_profile' not in self._stubs: + self._stubs['update_app_profile'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile', + request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_app_profile'] + + @property + def delete_app_profile(self) -> Callable[ + [bigtable_instance_admin.DeleteAppProfileRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete app profile method over gRPC. + + Deletes an app profile from an instance. + + Returns: + Callable[[~.DeleteAppProfileRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_app_profile' not in self._stubs: + self._stubs['delete_app_profile'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile', + request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_app_profile'] + + @property + def get_iam_policy(self) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Awaitable[policy_pb2.Policy]]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for an instance + resource. Returns an empty policy if an instance exists + but does not have a policy set. + + Returns: + Callable[[~.GetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_iam_policy' not in self._stubs: + self._stubs['get_iam_policy'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy', + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs['get_iam_policy'] + + @property + def set_iam_policy(self) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Awaitable[policy_pb2.Policy]]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on an instance + resource. Replaces any existing policy. + + Returns: + Callable[[~.SetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_iam_policy' not in self._stubs: + self._stubs['set_iam_policy'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy', + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs['set_iam_policy'] + + @property + def test_iam_permissions(self) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Awaitable[iam_policy_pb2.TestIamPermissionsResponse]]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the + specified instance resource. + + Returns: + Callable[[~.TestIamPermissionsRequest], + Awaitable[~.TestIamPermissionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'test_iam_permissions' not in self._stubs: + self._stubs['test_iam_permissions'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions', + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs['test_iam_permissions'] + + @property + def list_hot_tablets(self) -> Callable[ + [bigtable_instance_admin.ListHotTabletsRequest], + Awaitable[bigtable_instance_admin.ListHotTabletsResponse]]: + r"""Return a callable for the list hot tablets method over gRPC. + + Lists hot tablets in a cluster, within the time range + provided. Hot tablets are ordered based on CPU usage. + + Returns: + Callable[[~.ListHotTabletsRequest], + Awaitable[~.ListHotTabletsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_hot_tablets' not in self._stubs: + self._stubs['list_hot_tablets'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets', + request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, + response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, + ) + return self._stubs['list_hot_tablets'] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ( + 'BigtableInstanceAdminGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py new file mode 100644 index 000000000..c025f8b07 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -0,0 +1,2776 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import instance +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class BigtableInstanceAdminRestInterceptor: + """Interceptor for BigtableInstanceAdmin. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the BigtableInstanceAdminRestTransport. + + .. code-block:: python + class MyCustomBigtableInstanceAdminInterceptor(BigtableInstanceAdminRestInterceptor): + def pre_create_app_profile(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_app_profile(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_instance(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_instance(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_app_profile(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_delete_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_delete_instance(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_get_app_profile(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_app_profile(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_iam_policy(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_iam_policy(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_instance(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_instance(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_app_profiles(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_app_profiles(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_clusters(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_clusters(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_hot_tablets(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_hot_tablets(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_instances(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_instances(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_partial_update_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_partial_update_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_partial_update_instance(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_partial_update_instance(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_set_iam_policy(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_set_iam_policy(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_test_iam_permissions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_test_iam_permissions(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_app_profile(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_app_profile(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_cluster(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_cluster(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_instance(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_instance(self, response): + logging.log(f"Received response: {response}") + return response + + transport = BigtableInstanceAdminRestTransport(interceptor=MyCustomBigtableInstanceAdminInterceptor()) + client = BigtableInstanceAdminClient(transport=transport) + + + """ + def pre_create_app_profile(self, request: bigtable_instance_admin.CreateAppProfileRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.CreateAppProfileRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_app_profile + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_create_app_profile(self, response: instance.AppProfile) -> instance.AppProfile: + """Post-rpc interceptor for create_app_profile + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_create_cluster(self, request: bigtable_instance_admin.CreateClusterRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.CreateClusterRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_create_cluster(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for create_cluster + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_create_instance(self, request: bigtable_instance_admin.CreateInstanceRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.CreateInstanceRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_instance + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_create_instance(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for create_instance + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_delete_app_profile(self, request: bigtable_instance_admin.DeleteAppProfileRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.DeleteAppProfileRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_app_profile + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def pre_delete_cluster(self, request: bigtable_instance_admin.DeleteClusterRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.DeleteClusterRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def pre_delete_instance(self, request: bigtable_instance_admin.DeleteInstanceRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.DeleteInstanceRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_instance + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def pre_get_app_profile(self, request: bigtable_instance_admin.GetAppProfileRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.GetAppProfileRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_app_profile + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_get_app_profile(self, response: instance.AppProfile) -> instance.AppProfile: + """Post-rpc interceptor for get_app_profile + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_get_cluster(self, request: bigtable_instance_admin.GetClusterRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.GetClusterRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_get_cluster(self, response: instance.Cluster) -> instance.Cluster: + """Post-rpc interceptor for get_cluster + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_get_iam_policy(self, request: iam_policy_pb2.GetIamPolicyRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_get_instance(self, request: bigtable_instance_admin.GetInstanceRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.GetInstanceRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_instance + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_get_instance(self, response: instance.Instance) -> instance.Instance: + """Post-rpc interceptor for get_instance + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_list_app_profiles(self, request: bigtable_instance_admin.ListAppProfilesRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.ListAppProfilesRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_app_profiles + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_list_app_profiles(self, response: bigtable_instance_admin.ListAppProfilesResponse) -> bigtable_instance_admin.ListAppProfilesResponse: + """Post-rpc interceptor for list_app_profiles + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_list_clusters(self, request: bigtable_instance_admin.ListClustersRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.ListClustersRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_clusters + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_list_clusters(self, response: bigtable_instance_admin.ListClustersResponse) -> bigtable_instance_admin.ListClustersResponse: + """Post-rpc interceptor for list_clusters + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_list_hot_tablets(self, request: bigtable_instance_admin.ListHotTabletsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.ListHotTabletsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_hot_tablets + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_list_hot_tablets(self, response: bigtable_instance_admin.ListHotTabletsResponse) -> bigtable_instance_admin.ListHotTabletsResponse: + """Post-rpc interceptor for list_hot_tablets + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_list_instances(self, request: bigtable_instance_admin.ListInstancesRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.ListInstancesRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_instances + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_list_instances(self, response: bigtable_instance_admin.ListInstancesResponse) -> bigtable_instance_admin.ListInstancesResponse: + """Post-rpc interceptor for list_instances + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_partial_update_cluster(self, request: bigtable_instance_admin.PartialUpdateClusterRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.PartialUpdateClusterRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for partial_update_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_partial_update_cluster(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for partial_update_cluster + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_partial_update_instance(self, request: bigtable_instance_admin.PartialUpdateInstanceRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.PartialUpdateInstanceRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for partial_update_instance + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_partial_update_instance(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for partial_update_instance + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_set_iam_policy(self, request: iam_policy_pb2.SetIamPolicyRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_test_iam_permissions(self, request: iam_policy_pb2.TestIamPermissionsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_test_iam_permissions(self, response: iam_policy_pb2.TestIamPermissionsResponse) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_update_app_profile(self, request: bigtable_instance_admin.UpdateAppProfileRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.UpdateAppProfileRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_app_profile + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_update_app_profile(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for update_app_profile + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_update_cluster(self, request: instance.Cluster, metadata: Sequence[Tuple[str, str]]) -> Tuple[instance.Cluster, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_cluster + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_update_cluster(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for update_cluster + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + def pre_update_instance(self, request: instance.Instance, metadata: Sequence[Tuple[str, str]]) -> Tuple[instance.Instance, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_instance + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableInstanceAdmin server. + """ + return request, metadata + + def post_update_instance(self, response: instance.Instance) -> instance.Instance: + """Post-rpc interceptor for update_instance + + Override in a subclass to manipulate the response + after it is returned by the BigtableInstanceAdmin server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class BigtableInstanceAdminRestStub: + _session: AuthorizedSession + _host: str + _interceptor: BigtableInstanceAdminRestInterceptor + + +class BigtableInstanceAdminRestTransport(BigtableInstanceAdminTransport): + """REST backend transport for BigtableInstanceAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable Instances and Clusters. Provides access to the Instance + and Cluster schemas only, not the tables' metadata or data + stored in those tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__(self, *, + host: str = 'bigtableadmin.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[ + ], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = 'https', + interceptor: Optional[BigtableInstanceAdminRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or BigtableInstanceAdminRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + 'google.longrunning.Operations.CancelOperation': [ + { + 'method': 'post', + 'uri': '/v2/{name=operations/**}:cancel', + }, + ], + 'google.longrunning.Operations.DeleteOperation': [ + { + 'method': 'delete', + 'uri': '/v2/{name=operations/**}', + }, + ], + 'google.longrunning.Operations.GetOperation': [ + { + 'method': 'get', + 'uri': '/v2/{name=operations/**}', + }, + ], + 'google.longrunning.Operations.ListOperations': [ + { + 'method': 'get', + 'uri': '/v2/{name=operations/projects/**}/operations', + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v2") + + self._operations_client = operations_v1.AbstractOperationsClient(transport=rest_transport) + + # Return the client from cache. + return self._operations_client + + class _CreateAppProfile(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("CreateAppProfile") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "appProfileId" : "", } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.CreateAppProfileRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> instance.AppProfile: + r"""Call the create app profile method over HTTP. + + Args: + request (~.bigtable_instance_admin.CreateAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.CreateAppProfile. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.instance.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{parent=projects/*/instances/*}/appProfiles', + 'body': 'app_profile', + }, + ] + request, metadata = self._interceptor.pre_create_app_profile(request, metadata) + pb_request = bigtable_instance_admin.CreateAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = instance.AppProfile() + pb_resp = instance.AppProfile.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_app_profile(resp) + return resp + + class _CreateCluster(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("CreateCluster") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "clusterId" : "", } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.CreateClusterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the create cluster method over HTTP. + + Args: + request (~.bigtable_instance_admin.CreateClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.CreateCluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{parent=projects/*/instances/*}/clusters', + 'body': 'cluster', + }, + ] + request, metadata = self._interceptor.pre_create_cluster(request, metadata) + pb_request = bigtable_instance_admin.CreateClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_cluster(resp) + return resp + + class _CreateInstance(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("CreateInstance") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.CreateInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the create instance method over HTTP. + + Args: + request (~.bigtable_instance_admin.CreateInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.CreateInstance. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{parent=projects/*}/instances', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_create_instance(request, metadata) + pb_request = bigtable_instance_admin.CreateInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_instance(resp) + return resp + + class _DeleteAppProfile(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("DeleteAppProfile") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "ignoreWarnings" : False, } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.DeleteAppProfileRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ): + r"""Call the delete app profile method over HTTP. + + Args: + request (~.bigtable_instance_admin.DeleteAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteAppProfile. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v2/{name=projects/*/instances/*/appProfiles/*}', + }, + ] + request, metadata = self._interceptor.pre_delete_app_profile(request, metadata) + pb_request = bigtable_instance_admin.DeleteAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteCluster(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("DeleteCluster") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.DeleteClusterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ): + r"""Call the delete cluster method over HTTP. + + Args: + request (~.bigtable_instance_admin.DeleteClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteCluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v2/{name=projects/*/instances/*/clusters/*}', + }, + ] + request, metadata = self._interceptor.pre_delete_cluster(request, metadata) + pb_request = bigtable_instance_admin.DeleteClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteInstance(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("DeleteInstance") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.DeleteInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ): + r"""Call the delete instance method over HTTP. + + Args: + request (~.bigtable_instance_admin.DeleteInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.DeleteInstance. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v2/{name=projects/*/instances/*}', + }, + ] + request, metadata = self._interceptor.pre_delete_instance(request, metadata) + pb_request = bigtable_instance_admin.DeleteInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _GetAppProfile(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("GetAppProfile") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.GetAppProfileRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> instance.AppProfile: + r"""Call the get app profile method over HTTP. + + Args: + request (~.bigtable_instance_admin.GetAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.GetAppProfile. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.instance.AppProfile: + A configuration object describing how + Cloud Bigtable should treat traffic from + a particular end user application. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{name=projects/*/instances/*/appProfiles/*}', + }, + ] + request, metadata = self._interceptor.pre_get_app_profile(request, metadata) + pb_request = bigtable_instance_admin.GetAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = instance.AppProfile() + pb_resp = instance.AppProfile.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_app_profile(resp) + return resp + + class _GetCluster(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("GetCluster") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.GetClusterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> instance.Cluster: + r"""Call the get cluster method over HTTP. + + Args: + request (~.bigtable_instance_admin.GetClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.GetCluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.instance.Cluster: + A resizable group of nodes in a particular cloud + location, capable of serving all + [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{name=projects/*/instances/*/clusters/*}', + }, + ] + request, metadata = self._interceptor.pre_get_cluster(request, metadata) + pb_request = bigtable_instance_admin.GetClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = instance.Cluster() + pb_resp = instance.Cluster.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_cluster(resp) + return resp + + class _GetIamPolicy(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("GetIamPolicy") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: iam_policy_pb2.GetIamPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> policy_pb2.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.iam_policy_pb2.GetIamPolicyRequest): + The request object. Request message for ``GetIamPolicy`` method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members``, or + principals, to a single ``role``. Principals can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A ``role`` is a named list of + permissions; each ``role`` can be an IAM predefined role + or a user-created custom role. + + For some types of Google Cloud resources, a ``binding`` + can also specify a ``condition``, which is a logical + expression that allows access to a resource only if the + expression evaluates to ``true``. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + + **JSON example:** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } + + **YAML example:** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 + + For a description of IAM and its features, see the `IAM + documentation `__. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{resource=projects/*/instances/*}:getIamPolicy', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = policy_pb2.Policy() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + class _GetInstance(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("GetInstance") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.GetInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> instance.Instance: + r"""Call the get instance method over HTTP. + + Args: + request (~.bigtable_instance_admin.GetInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.GetInstance. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.instance.Instance: + A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{name=projects/*/instances/*}', + }, + ] + request, metadata = self._interceptor.pre_get_instance(request, metadata) + pb_request = bigtable_instance_admin.GetInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = instance.Instance() + pb_resp = instance.Instance.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_instance(resp) + return resp + + class _ListAppProfiles(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("ListAppProfiles") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.ListAppProfilesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> bigtable_instance_admin.ListAppProfilesResponse: + r"""Call the list app profiles method over HTTP. + + Args: + request (~.bigtable_instance_admin.ListAppProfilesRequest): + The request object. Request message for + BigtableInstanceAdmin.ListAppProfiles. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_instance_admin.ListAppProfilesResponse: + Response message for + BigtableInstanceAdmin.ListAppProfiles. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{parent=projects/*/instances/*}/appProfiles', + }, + ] + request, metadata = self._interceptor.pre_list_app_profiles(request, metadata) + pb_request = bigtable_instance_admin.ListAppProfilesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_instance_admin.ListAppProfilesResponse() + pb_resp = bigtable_instance_admin.ListAppProfilesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_app_profiles(resp) + return resp + + class _ListClusters(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("ListClusters") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.ListClustersRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> bigtable_instance_admin.ListClustersResponse: + r"""Call the list clusters method over HTTP. + + Args: + request (~.bigtable_instance_admin.ListClustersRequest): + The request object. Request message for + BigtableInstanceAdmin.ListClusters. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_instance_admin.ListClustersResponse: + Response message for + BigtableInstanceAdmin.ListClusters. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{parent=projects/*/instances/*}/clusters', + }, + ] + request, metadata = self._interceptor.pre_list_clusters(request, metadata) + pb_request = bigtable_instance_admin.ListClustersRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_instance_admin.ListClustersResponse() + pb_resp = bigtable_instance_admin.ListClustersResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_clusters(resp) + return resp + + class _ListHotTablets(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("ListHotTablets") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.ListHotTabletsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> bigtable_instance_admin.ListHotTabletsResponse: + r"""Call the list hot tablets method over HTTP. + + Args: + request (~.bigtable_instance_admin.ListHotTabletsRequest): + The request object. Request message for + BigtableInstanceAdmin.ListHotTablets. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_instance_admin.ListHotTabletsResponse: + Response message for + BigtableInstanceAdmin.ListHotTablets. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets', + }, + ] + request, metadata = self._interceptor.pre_list_hot_tablets(request, metadata) + pb_request = bigtable_instance_admin.ListHotTabletsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_instance_admin.ListHotTabletsResponse() + pb_resp = bigtable_instance_admin.ListHotTabletsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_hot_tablets(resp) + return resp + + class _ListInstances(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("ListInstances") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.ListInstancesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> bigtable_instance_admin.ListInstancesResponse: + r"""Call the list instances method over HTTP. + + Args: + request (~.bigtable_instance_admin.ListInstancesRequest): + The request object. Request message for + BigtableInstanceAdmin.ListInstances. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_instance_admin.ListInstancesResponse: + Response message for + BigtableInstanceAdmin.ListInstances. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{parent=projects/*}/instances', + }, + ] + request, metadata = self._interceptor.pre_list_instances(request, metadata) + pb_request = bigtable_instance_admin.ListInstancesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_instance_admin.ListInstancesResponse() + pb_resp = bigtable_instance_admin.ListInstancesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_instances(resp) + return resp + + class _PartialUpdateCluster(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("PartialUpdateCluster") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask" : {}, } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.PartialUpdateClusterRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the partial update cluster method over HTTP. + + Args: + request (~.bigtable_instance_admin.PartialUpdateClusterRequest): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateCluster. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'patch', + 'uri': '/v2/{cluster.name=projects/*/instances/*/clusters/*}', + 'body': 'cluster', + }, + ] + request, metadata = self._interceptor.pre_partial_update_cluster(request, metadata) + pb_request = bigtable_instance_admin.PartialUpdateClusterRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_partial_update_cluster(resp) + return resp + + class _PartialUpdateInstance(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("PartialUpdateInstance") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask" : {}, } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.PartialUpdateInstanceRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the partial update instance method over HTTP. + + Args: + request (~.bigtable_instance_admin.PartialUpdateInstanceRequest): + The request object. Request message for + BigtableInstanceAdmin.PartialUpdateInstance. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'patch', + 'uri': '/v2/{instance.name=projects/*/instances/*}', + 'body': 'instance', + }, + ] + request, metadata = self._interceptor.pre_partial_update_instance(request, metadata) + pb_request = bigtable_instance_admin.PartialUpdateInstanceRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_partial_update_instance(resp) + return resp + + class _SetIamPolicy(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("SetIamPolicy") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: iam_policy_pb2.SetIamPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> policy_pb2.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.iam_policy_pb2.SetIamPolicyRequest): + The request object. Request message for ``SetIamPolicy`` method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members``, or + principals, to a single ``role``. Principals can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A ``role`` is a named list of + permissions; each ``role`` can be an IAM predefined role + or a user-created custom role. + + For some types of Google Cloud resources, a ``binding`` + can also specify a ``condition``, which is a logical + expression that allows access to a resource only if the + expression evaluates to ``true``. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + + **JSON example:** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } + + **YAML example:** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 + + For a description of IAM and its features, see the `IAM + documentation `__. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{resource=projects/*/instances/*}:setIamPolicy', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = policy_pb2.Policy() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + class _TestIamPermissions(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("TestIamPermissions") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: iam_policy_pb2.TestIamPermissionsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.iam_policy_pb2.TestIamPermissionsRequest): + The request object. Request message for ``TestIamPermissions`` method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{resource=projects/*/instances/*}:testIamPermissions', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_test_iam_permissions(request, metadata) + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = iam_policy_pb2.TestIamPermissionsResponse() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + class _UpdateAppProfile(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("UpdateAppProfile") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask" : {}, } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_instance_admin.UpdateAppProfileRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the update app profile method over HTTP. + + Args: + request (~.bigtable_instance_admin.UpdateAppProfileRequest): + The request object. Request message for + BigtableInstanceAdmin.UpdateAppProfile. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'patch', + 'uri': '/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}', + 'body': 'app_profile', + }, + ] + request, metadata = self._interceptor.pre_update_app_profile(request, metadata) + pb_request = bigtable_instance_admin.UpdateAppProfileRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_app_profile(resp) + return resp + + class _UpdateCluster(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("UpdateCluster") + + def __call__(self, + request: instance.Cluster, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the update cluster method over HTTP. + + Args: + request (~.instance.Cluster): + The request object. A resizable group of nodes in a particular cloud + location, capable of serving all + [Tables][google.bigtable.admin.v2.Table] in the parent + [Instance][google.bigtable.admin.v2.Instance]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'put', + 'uri': '/v2/{name=projects/*/instances/*/clusters/*}', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_update_cluster(request, metadata) + pb_request = instance.Cluster.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_cluster(resp) + return resp + + class _UpdateInstance(BigtableInstanceAdminRestStub): + def __hash__(self): + return hash("UpdateInstance") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: instance.Instance, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> instance.Instance: + r"""Call the update instance method over HTTP. + + Args: + request (~.instance.Instance): + The request object. A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.instance.Instance: + A collection of Bigtable + [Tables][google.bigtable.admin.v2.Table] and the + resources that serve them. All tables in an instance are + served from all + [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'put', + 'uri': '/v2/{name=projects/*/instances/*}', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_update_instance(request, metadata) + pb_request = instance.Instance.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = instance.Instance() + pb_resp = instance.Instance.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_instance(resp) + return resp + + @property + def create_app_profile(self) -> Callable[ + [bigtable_instance_admin.CreateAppProfileRequest], + instance.AppProfile]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateAppProfile(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_cluster(self) -> Callable[ + [bigtable_instance_admin.CreateClusterRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_instance(self) -> Callable[ + [bigtable_instance_admin.CreateInstanceRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateInstance(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_app_profile(self) -> Callable[ + [bigtable_instance_admin.DeleteAppProfileRequest], + empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteAppProfile(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_cluster(self) -> Callable[ + [bigtable_instance_admin.DeleteClusterRequest], + empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_instance(self) -> Callable[ + [bigtable_instance_admin.DeleteInstanceRequest], + empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteInstance(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_app_profile(self) -> Callable[ + [bigtable_instance_admin.GetAppProfileRequest], + instance.AppProfile]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetAppProfile(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_cluster(self) -> Callable[ + [bigtable_instance_admin.GetClusterRequest], + instance.Cluster]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_iam_policy(self) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + policy_pb2.Policy]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_instance(self) -> Callable[ + [bigtable_instance_admin.GetInstanceRequest], + instance.Instance]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetInstance(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_app_profiles(self) -> Callable[ + [bigtable_instance_admin.ListAppProfilesRequest], + bigtable_instance_admin.ListAppProfilesResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListAppProfiles(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_clusters(self) -> Callable[ + [bigtable_instance_admin.ListClustersRequest], + bigtable_instance_admin.ListClustersResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListClusters(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_hot_tablets(self) -> Callable[ + [bigtable_instance_admin.ListHotTabletsRequest], + bigtable_instance_admin.ListHotTabletsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListHotTablets(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_instances(self) -> Callable[ + [bigtable_instance_admin.ListInstancesRequest], + bigtable_instance_admin.ListInstancesResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListInstances(self._session, self._host, self._interceptor) # type: ignore + + @property + def partial_update_cluster(self) -> Callable[ + [bigtable_instance_admin.PartialUpdateClusterRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._PartialUpdateCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def partial_update_instance(self) -> Callable[ + [bigtable_instance_admin.PartialUpdateInstanceRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._PartialUpdateInstance(self._session, self._host, self._interceptor) # type: ignore + + @property + def set_iam_policy(self) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + policy_pb2.Policy]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + @property + def test_iam_permissions(self) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_app_profile(self) -> Callable[ + [bigtable_instance_admin.UpdateAppProfileRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateAppProfile(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_cluster(self) -> Callable[ + [instance.Cluster], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateCluster(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_instance(self) -> Callable[ + [instance.Instance], + instance.Instance]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateInstance(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__=( + 'BigtableInstanceAdminRestTransport', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py new file mode 100644 index 000000000..4923ee911 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .client import BigtableTableAdminClient +from .async_client import BigtableTableAdminAsyncClient + +__all__ = ( + 'BigtableTableAdminClient', + 'BigtableTableAdminAsyncClient', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py new file mode 100644 index 000000000..987d9ca4f --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -0,0 +1,2645 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import functools +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union + +from google.cloud.bigtable_admin_v2 import gapic_version as package_version + +from google.api_core.client_options import ClientOptions +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport +from .client import BigtableTableAdminClient + + +class BigtableTableAdminAsyncClient: + """Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + """ + + _client: BigtableTableAdminClient + + DEFAULT_ENDPOINT = BigtableTableAdminClient.DEFAULT_ENDPOINT + DEFAULT_MTLS_ENDPOINT = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + + backup_path = staticmethod(BigtableTableAdminClient.backup_path) + parse_backup_path = staticmethod(BigtableTableAdminClient.parse_backup_path) + cluster_path = staticmethod(BigtableTableAdminClient.cluster_path) + parse_cluster_path = staticmethod(BigtableTableAdminClient.parse_cluster_path) + crypto_key_version_path = staticmethod(BigtableTableAdminClient.crypto_key_version_path) + parse_crypto_key_version_path = staticmethod(BigtableTableAdminClient.parse_crypto_key_version_path) + instance_path = staticmethod(BigtableTableAdminClient.instance_path) + parse_instance_path = staticmethod(BigtableTableAdminClient.parse_instance_path) + snapshot_path = staticmethod(BigtableTableAdminClient.snapshot_path) + parse_snapshot_path = staticmethod(BigtableTableAdminClient.parse_snapshot_path) + table_path = staticmethod(BigtableTableAdminClient.table_path) + parse_table_path = staticmethod(BigtableTableAdminClient.parse_table_path) + common_billing_account_path = staticmethod(BigtableTableAdminClient.common_billing_account_path) + parse_common_billing_account_path = staticmethod(BigtableTableAdminClient.parse_common_billing_account_path) + common_folder_path = staticmethod(BigtableTableAdminClient.common_folder_path) + parse_common_folder_path = staticmethod(BigtableTableAdminClient.parse_common_folder_path) + common_organization_path = staticmethod(BigtableTableAdminClient.common_organization_path) + parse_common_organization_path = staticmethod(BigtableTableAdminClient.parse_common_organization_path) + common_project_path = staticmethod(BigtableTableAdminClient.common_project_path) + parse_common_project_path = staticmethod(BigtableTableAdminClient.parse_common_project_path) + common_location_path = staticmethod(BigtableTableAdminClient.common_location_path) + parse_common_location_path = staticmethod(BigtableTableAdminClient.parse_common_location_path) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableTableAdminAsyncClient: The constructed client. + """ + return BigtableTableAdminClient.from_service_account_info.__func__(BigtableTableAdminAsyncClient, info, *args, **kwargs) # type: ignore + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableTableAdminAsyncClient: The constructed client. + """ + return BigtableTableAdminClient.from_service_account_file.__func__(BigtableTableAdminAsyncClient, filename, *args, **kwargs) # type: ignore + + from_service_account_json = from_service_account_file + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + return BigtableTableAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore + + @property + def transport(self) -> BigtableTableAdminTransport: + """Returns the transport used by the client instance. + + Returns: + BigtableTableAdminTransport: The transport used by the client instance. + """ + return self._client.transport + + get_transport_class = functools.partial(type(BigtableTableAdminClient).get_transport_class, type(BigtableTableAdminClient)) + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Union[str, BigtableTableAdminTransport] = "grpc_asyncio", + client_options: Optional[ClientOptions] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the bigtable table admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, ~.BigtableTableAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (ClientOptions): Custom options for the client. It + won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + """ + self._client = BigtableTableAdminClient( + credentials=credentials, + transport=transport, + client_options=client_options, + client_info=client_info, + + ) + + async def create_table(self, + request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, + *, + parent: Optional[str] = None, + table_id: Optional[str] = None, + table: Optional[gba_table.Table] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gba_table.Table: + r"""Creates a new table in the specified instance. + The table can be created with a full set of initial + column families, specified in the request. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + parent (:class:`str`): + Required. The unique name of the instance in which to + create the table. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table_id (:class:`str`): + Required. The name by which the new table should be + referred to within the parent instance, e.g., ``foobar`` + rather than ``{parent}/tables/foobar``. Maximum 50 + characters. + + This corresponds to the ``table_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table (:class:`google.cloud.bigtable_admin_v2.types.Table`): + Required. The Table to create. + This corresponds to the ``table`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, table_id, table]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.CreateTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if table_id is not None: + request.table_id = table_id + if table is not None: + request.table = table + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_table, + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def create_table_from_snapshot(self, + request: Optional[Union[bigtable_table_admin.CreateTableFromSnapshotRequest, dict]] = None, + *, + parent: Optional[str] = None, + table_id: Optional[str] = None, + source_snapshot: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new table from the specified snapshot. The + target table must not exist. The snapshot and the table + must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (:class:`str`): + Required. The unique name of the instance in which to + create the table. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table_id (:class:`str`): + Required. The name by which the new table should be + referred to within the parent instance, e.g., ``foobar`` + rather than ``{parent}/tables/foobar``. + + This corresponds to the ``table_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_snapshot (:class:`str`): + Required. The unique name of the snapshot from which to + restore the table. The snapshot and the table must be in + the same instance. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``source_snapshot`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, table_id, source_snapshot]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if table_id is not None: + request.table_id = table_id + if source_snapshot is not None: + request.source_snapshot = source_snapshot + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_table_from_snapshot, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.CreateTableFromSnapshotMetadata, + ) + + # Done; return the response. + return response + + async def list_tables(self, + request: Optional[Union[bigtable_table_admin.ListTablesRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTablesAsyncPager: + r"""Lists all tables served from a specified instance. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + parent (:class:`str`): + Required. The unique name of the instance for which + tables should be listed. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesAsyncPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.ListTablesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_tables, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListTablesAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def get_table(self, + request: Optional[Union[bigtable_table_admin.GetTableRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Gets metadata information about the specified table. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + name (:class:`str`): + Required. The unique name of the requested table. Values + are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.GetTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_table, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_table(self, + request: Optional[Union[bigtable_table_admin.UpdateTableRequest, dict]] = None, + *, + table: Optional[gba_table.Table] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Updates a specified table. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]]): + The request object. The request for + [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. + table (:class:`google.cloud.bigtable_admin_v2.types.Table`): + Required. The table to update. The table's ``name`` + field is used to identify the table to update. + + This corresponds to the ``table`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. The list of fields to update. A mask + specifying which fields (e.g. ``change_stream_config``) + in the ``table`` field should be updated. This mask is + relative to the ``table`` field, not to the request + message. The wildcard (*) path is currently not + supported. Currently UpdateTable is only supported for + the following fields: + + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + + If ``column_families`` is set in ``update_mask``, it + will return an UNIMPLEMENTED error. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.UpdateTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table is not None: + request.table = table + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_table, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table.name", request.table.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + gba_table.Table, + metadata_type=bigtable_table_admin.UpdateTableMetadata, + ) + + # Done; return the response. + return response + + async def delete_table(self, + request: Optional[Union[bigtable_table_admin.DeleteTableRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes a specified table and all of its + data. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + name (:class:`str`): + Required. The unique name of the table to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.DeleteTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_table, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def undelete_table(self, + request: Optional[Union[bigtable_table_admin.UndeleteTableRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Restores a specified table which was accidentally + deleted. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] + name (:class:`str`): + Required. The unique name of the table to be restored. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.UndeleteTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.undelete_table, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.UndeleteTableMetadata, + ) + + # Done; return the response. + return response + + async def modify_column_families(self, + request: Optional[Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict]] = None, + *, + name: Optional[str] = None, + modifications: Optional[MutableSequence[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] + name (:class:`str`): + Required. The unique name of the table whose families + should be modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + modifications (:class:`MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]`): + Required. Modifications to be + atomically applied to the specified + table's families. Entries are applied in + order, meaning that earlier + modifications can be masked by later + ones (in the case of repeated updates to + the same family, for example). + + This corresponds to the ``modifications`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, modifications]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if modifications: + request.modifications.extend(modifications) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.modify_column_families, + default_timeout=300.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def drop_row_range(self, + request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + request = bigtable_table_admin.DropRowRangeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.drop_row_range, + default_timeout=3600.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def generate_consistency_token(self, + request: Optional[Union[bigtable_table_admin.GenerateConsistencyTokenRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: + r"""Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + name (:class:`str`): + Required. The unique name of the Table for which to + create a consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.generate_consistency_token, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def check_consistency(self, + request: Optional[Union[bigtable_table_admin.CheckConsistencyRequest, dict]] = None, + *, + name: Optional[str] = None, + consistency_token: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.CheckConsistencyResponse: + r"""Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + name (:class:`str`): + Required. The unique name of the Table for which to + check replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + consistency_token (:class:`str`): + Required. The token created using + GenerateConsistencyToken for the Table. + + This corresponds to the ``consistency_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, consistency_token]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.CheckConsistencyRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if consistency_token is not None: + request.consistency_token = consistency_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.check_consistency, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def snapshot_table(self, + request: Optional[Union[bigtable_table_admin.SnapshotTableRequest, dict]] = None, + *, + name: Optional[str] = None, + cluster: Optional[str] = None, + snapshot_id: Optional[str] = None, + description: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (:class:`str`): + Required. The unique name of the table to have the + snapshot taken. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (:class:`str`): + Required. The name of the cluster where the snapshot + will be created in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + snapshot_id (:class:`str`): + Required. The ID by which the new snapshot should be + referred to within the parent cluster, e.g., + ``mysnapshot`` of the form: + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. + + This corresponds to the ``snapshot_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + description (:class:`str`): + Description of the snapshot. + This corresponds to the ``description`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a + checkpoint for data restoration or a data source for + a new table. + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, cluster, snapshot_id, description]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.SnapshotTableRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if cluster is not None: + request.cluster = cluster + if snapshot_id is not None: + request.snapshot_id = snapshot_id + if description is not None: + request.description = description + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.snapshot_table, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Snapshot, + metadata_type=bigtable_table_admin.SnapshotTableMetadata, + ) + + # Done; return the response. + return response + + async def get_snapshot(self, + request: Optional[Union[bigtable_table_admin.GetSnapshotRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Snapshot: + r"""Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (:class:`str`): + Required. The unique name of the requested snapshot. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Snapshot: + A snapshot of a table at a particular + time. A snapshot can be used as a + checkpoint for data restoration or a + data source for a new table. + + Note: This is a private alpha release of + Cloud Bigtable snapshots. This feature + is not currently available to most Cloud + Bigtable customers. This feature might + be changed in backward-incompatible ways + and is not recommended for production + use. It is not subject to any SLA or + deprecation policy. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.GetSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_snapshot, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def list_snapshots(self, + request: Optional[Union[bigtable_table_admin.ListSnapshotsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSnapshotsAsyncPager: + r"""Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (:class:`str`): + Required. The unique name of the cluster for which + snapshots should be listed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list snapshots for all + clusters in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsAsyncPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.ListSnapshotsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_snapshots, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListSnapshotsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_snapshot(self, + request: Optional[Union[bigtable_table_admin.DeleteSnapshotRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (:class:`str`): + Required. The unique name of the snapshot to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.DeleteSnapshotRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_snapshot, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def create_backup(self, + request: Optional[Union[bigtable_table_admin.CreateBackupRequest, dict]] = None, + *, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + backup: Optional[table.Backup] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]]): + The request object. The request for + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + parent (:class:`str`): + Required. This must be one of the clusters in the + instance in which this table is located. The backup will + be stored in this cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (:class:`str`): + Required. The id of the backup to be created. The + ``backup_id`` along with the parent ``parent`` are + combined as {parent}/backups/{backup_id} to create the + full backup name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): + Required. The backup to create. + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, backup]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.CreateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if backup is not None: + request.backup = backup + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.create_backup, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CreateBackupMetadata, + ) + + # Done; return the response. + return response + + async def get_backup(self, + request: Optional[Union[bigtable_table_admin.GetBackupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Gets metadata on a pending or completed Cloud + Bigtable Backup. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]]): + The request object. The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + name (:class:`str`): + Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.GetBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_backup, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def update_backup(self, + request: Optional[Union[bigtable_table_admin.UpdateBackupRequest, dict]] = None, + *, + backup: Optional[table.Backup] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Updates a pending or completed Cloud Bigtable Backup. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]]): + The request object. The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. + backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only + supported for the following fields: + + - ``backup.expire_time``. + + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be + updated. This mask is relative to the Backup resource, + not to the request message. The field mask must always + be specified; this prevents any future fields from being + erased accidentally by clients that do not know about + them. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([backup, update_mask]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.UpdateBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if backup is not None: + request.backup = backup + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.update_backup, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("backup.name", request.backup.name), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def delete_backup(self, + request: Optional[Union[bigtable_table_admin.DeleteBackupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a pending or completed Cloud Bigtable backup. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]]): + The request object. The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + name (:class:`str`): + Required. Name of the backup to delete. Values are of + the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.DeleteBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.delete_backup, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + async def list_backups(self, + request: Optional[Union[bigtable_table_admin.ListBackupsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBackupsAsyncPager: + r"""Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]]): + The request object. The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + parent (:class:`str`): + Required. The cluster to list backups from. Values are + of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list backups for all clusters + in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager: + The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.ListBackupsRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.list_backups, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__aiter__` convenience method. + response = pagers.ListBackupsAsyncPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def restore_table(self, + request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Create a new table by restoring from a completed backup. The + returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]]): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + request = bigtable_table_admin.RestoreTableRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.restore_table, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.RestoreTableMetadata, + ) + + # Done; return the response. + return response + + async def copy_backup(self, + request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, + *, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + source_backup: Optional[str] = None, + expire_time: Optional[timestamp_pb2.Timestamp] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]]): + The request object. The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + parent (:class:`str`): + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already + exists. Values are of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (:class:`str`): + Required. The id of the new backup. The ``backup_id`` + along with ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup + name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_backup (:class:`str`): + Required. The source backup to be copied from. The + source backup needs to be in READY state for it to be + copied. Copying a copied backup is not allowed. Once + CopyBackup is in progress, the source backup cannot be + deleted or cleaned up on expiration until CopyBackup is + finished. Values are of the form: + ``projects//instances//clusters//backups/``. + + This corresponds to the ``source_backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + expire_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): + Required. Required. The expiration time of the copied + backup with microsecond granularity that must be at + least 6 hours and at most 30 days from the time the + request is received. Once the ``expire_time`` has + passed, Cloud Bigtable will delete the backup and free + the resources used by the backup. + + This corresponds to the ``expire_time`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, source_backup, expire_time]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + request = bigtable_table_admin.CopyBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if source_backup is not None: + request.source_backup = source_backup + if expire_time is not None: + request.expire_time = expire_time + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.copy_backup, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CopyBackupMetadata, + ) + + # Done; return the response. + return response + + async def get_iam_policy(self, + request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, + *, + resource: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the access control policy for a Table or Backup + resource. Returns an empty policy if the resource exists + but does not have a policy set. + + Args: + request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): + The request object. Request message for ``GetIamPolicy`` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.GetIamPolicyRequest(**request) + elif not request: + request = iam_policy_pb2.GetIamPolicyRequest(resource=resource, ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.get_iam_policy, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("resource", request.resource), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def set_iam_policy(self, + request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, + *, + resource: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the access control policy on a Table or Backup + resource. Replaces any existing policy. + + Args: + request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): + The request object. Request message for ``SetIamPolicy`` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.SetIamPolicyRequest(**request) + elif not request: + request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.set_iam_policy, + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("resource", request.resource), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def test_iam_permissions(self, + request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, + *, + resource: Optional[str] = None, + permissions: Optional[MutableSequence[str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified Table or Backup resource. + + Args: + request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): + The request object. Request message for ``TestIamPermissions`` method. + resource (:class:`str`): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (:class:`MutableSequence[str]`): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError("If the `request` argument is set, then none of " + "the individual field arguments should be set.") + + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + if isinstance(request, dict): + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + elif not request: + request = iam_policy_pb2.TestIamPermissionsRequest(resource=resource, permissions=permissions, ) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.test_iam_permissions, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("resource", request.resource), + )), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + async def __aenter__(self) -> "BigtableTableAdminAsyncClient": + return self + + async def __aexit__(self, exc_type, exc, tb): + await self.transport.close() + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "BigtableTableAdminAsyncClient", +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py new file mode 100644 index 000000000..6460a00ce --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -0,0 +1,2825 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +import os +import re +from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, cast + +from google.cloud.bigtable_admin_v2 import gapic_version as package_version + +from google.api_core import client_options as client_options_lib +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport import mtls # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth.exceptions import MutualTLSChannelError # type: ignore +from google.oauth2 import service_account # type: ignore + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + +from google.api_core import operation # type: ignore +from google.api_core import operation_async # type: ignore +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO +from .transports.grpc import BigtableTableAdminGrpcTransport +from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport +from .transports.rest import BigtableTableAdminRestTransport + + +class BigtableTableAdminClientMeta(type): + """Metaclass for the BigtableTableAdmin client. + + This provides class-level methods for building and retrieving + support objects (e.g. transport) without polluting the client instance + objects. + """ + _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTableAdminTransport]] + _transport_registry["grpc"] = BigtableTableAdminGrpcTransport + _transport_registry["grpc_asyncio"] = BigtableTableAdminGrpcAsyncIOTransport + _transport_registry["rest"] = BigtableTableAdminRestTransport + + def get_transport_class(cls, + label: Optional[str] = None, + ) -> Type[BigtableTableAdminTransport]: + """Returns an appropriate transport class. + + Args: + label: The name of the desired transport. If none is + provided, then the first transport in the registry is used. + + Returns: + The transport class to use. + """ + # If a specific transport is requested, return that one. + if label: + return cls._transport_registry[label] + + # No transport is requested; return the default (that is, the first one + # in the dictionary). + return next(iter(cls._transport_registry.values())) + + +class BigtableTableAdminClient(metaclass=BigtableTableAdminClientMeta): + """Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + """ + + @staticmethod + def _get_default_mtls_endpoint(api_endpoint): + """Converts api endpoint to mTLS endpoint. + + Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to + "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. + Args: + api_endpoint (Optional[str]): the api endpoint to convert. + Returns: + str: converted mTLS api endpoint. + """ + if not api_endpoint: + return api_endpoint + + mtls_endpoint_re = re.compile( + r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" + ) + + m = mtls_endpoint_re.match(api_endpoint) + name, mtls, sandbox, googledomain = m.groups() + if mtls or not googledomain: + return api_endpoint + + if sandbox: + return api_endpoint.replace( + "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" + ) + + return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + + DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" + DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore + DEFAULT_ENDPOINT + ) + + @classmethod + def from_service_account_info(cls, info: dict, *args, **kwargs): + """Creates an instance of this client using the provided credentials + info. + + Args: + info (dict): The service account private key info. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableTableAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_info(info) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + @classmethod + def from_service_account_file(cls, filename: str, *args, **kwargs): + """Creates an instance of this client using the provided credentials + file. + + Args: + filename (str): The path to the service account private key json + file. + args: Additional arguments to pass to the constructor. + kwargs: Additional arguments to pass to the constructor. + + Returns: + BigtableTableAdminClient: The constructed client. + """ + credentials = service_account.Credentials.from_service_account_file( + filename) + kwargs["credentials"] = credentials + return cls(*args, **kwargs) + + from_service_account_json = from_service_account_file + + @property + def transport(self) -> BigtableTableAdminTransport: + """Returns the transport used by the client instance. + + Returns: + BigtableTableAdminTransport: The transport used by the client + instance. + """ + return self._transport + + @staticmethod + def backup_path(project: str,instance: str,cluster: str,backup: str,) -> str: + """Returns a fully-qualified backup string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format(project=project, instance=instance, cluster=cluster, backup=backup, ) + + @staticmethod + def parse_backup_path(path: str) -> Dict[str,str]: + """Parses a backup path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/backups/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def cluster_path(project: str,instance: str,cluster: str,) -> str: + """Returns a fully-qualified cluster string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}".format(project=project, instance=instance, cluster=cluster, ) + + @staticmethod + def parse_cluster_path(path: str) -> Dict[str,str]: + """Parses a cluster path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def crypto_key_version_path(project: str,location: str,key_ring: str,crypto_key: str,crypto_key_version: str,) -> str: + """Returns a fully-qualified crypto_key_version string.""" + return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format(project=project, location=location, key_ring=key_ring, crypto_key=crypto_key, crypto_key_version=crypto_key_version, ) + + @staticmethod + def parse_crypto_key_version_path(path: str) -> Dict[str,str]: + """Parses a crypto_key_version path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)/cryptoKeyVersions/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def instance_path(project: str,instance: str,) -> str: + """Returns a fully-qualified instance string.""" + return "projects/{project}/instances/{instance}".format(project=project, instance=instance, ) + + @staticmethod + def parse_instance_path(path: str) -> Dict[str,str]: + """Parses a instance path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def snapshot_path(project: str,instance: str,cluster: str,snapshot: str,) -> str: + """Returns a fully-qualified snapshot string.""" + return "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format(project=project, instance=instance, cluster=cluster, snapshot=snapshot, ) + + @staticmethod + def parse_snapshot_path(path: str) -> Dict[str,str]: + """Parses a snapshot path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/snapshots/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def table_path(project: str,instance: str,table: str,) -> str: + """Returns a fully-qualified table string.""" + return "projects/{project}/instances/{instance}/tables/{table}".format(project=project, instance=instance, table=table, ) + + @staticmethod + def parse_table_path(path: str) -> Dict[str,str]: + """Parses a table path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_billing_account_path(billing_account: str, ) -> str: + """Returns a fully-qualified billing_account string.""" + return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + + @staticmethod + def parse_common_billing_account_path(path: str) -> Dict[str,str]: + """Parse a billing_account path into its component segments.""" + m = re.match(r"^billingAccounts/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_folder_path(folder: str, ) -> str: + """Returns a fully-qualified folder string.""" + return "folders/{folder}".format(folder=folder, ) + + @staticmethod + def parse_common_folder_path(path: str) -> Dict[str,str]: + """Parse a folder path into its component segments.""" + m = re.match(r"^folders/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_organization_path(organization: str, ) -> str: + """Returns a fully-qualified organization string.""" + return "organizations/{organization}".format(organization=organization, ) + + @staticmethod + def parse_common_organization_path(path: str) -> Dict[str,str]: + """Parse a organization path into its component segments.""" + m = re.match(r"^organizations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_project_path(project: str, ) -> str: + """Returns a fully-qualified project string.""" + return "projects/{project}".format(project=project, ) + + @staticmethod + def parse_common_project_path(path: str) -> Dict[str,str]: + """Parse a project path into its component segments.""" + m = re.match(r"^projects/(?P.+?)$", path) + return m.groupdict() if m else {} + + @staticmethod + def common_location_path(project: str, location: str, ) -> str: + """Returns a fully-qualified location string.""" + return "projects/{project}/locations/{location}".format(project=project, location=location, ) + + @staticmethod + def parse_common_location_path(path: str) -> Dict[str,str]: + """Parse a location path into its component segments.""" + m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) + return m.groupdict() if m else {} + + @classmethod + def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): + """Return the API endpoint and client cert source for mutual TLS. + + The client cert source is determined in the following order: + (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the + client cert source is None. + (2) if `client_options.client_cert_source` is provided, use the provided one; if the + default client cert source exists, use the default one; otherwise the client cert + source is None. + + The API endpoint is determined in the following order: + (1) if `client_options.api_endpoint` if provided, use the provided one. + (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the + default mTLS endpoint; if the environment variable is "never", use the default API + endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise + use the default API endpoint. + + More details can be found at https://google.aip.dev/auth/4114. + + Args: + client_options (google.api_core.client_options.ClientOptions): Custom options for the + client. Only the `api_endpoint` and `client_cert_source` properties may be used + in this method. + + Returns: + Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the + client cert source to use. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If any errors happen. + """ + if client_options is None: + client_options = client_options_lib.ClientOptions() + use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") + if use_client_cert not in ("true", "false"): + raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") + + # Figure out the client cert source to use. + client_cert_source = None + if use_client_cert == "true": + if client_options.client_cert_source: + client_cert_source = client_options.client_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + + # Figure out which api endpoint to use. + if client_options.api_endpoint is not None: + api_endpoint = client_options.api_endpoint + elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): + api_endpoint = cls.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = cls.DEFAULT_ENDPOINT + + return api_endpoint, client_cert_source + + def __init__(self, *, + credentials: Optional[ga_credentials.Credentials] = None, + transport: Optional[Union[str, BigtableTableAdminTransport]] = None, + client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + ) -> None: + """Instantiates the bigtable table admin client. + + Args: + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + transport (Union[str, BigtableTableAdminTransport]): The + transport to use. If set to None, a transport is chosen + automatically. + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the + client. It won't take effect if a ``transport`` instance is provided. + (1) The ``api_endpoint`` property can be used to override the + default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT + environment variable can also be used to override the endpoint: + "always" (always use the default mTLS endpoint), "never" (always + use the default regular endpoint) and "auto" (auto switch to the + default mTLS endpoint if client certificate is present, this is + the default value). However, the ``api_endpoint`` property takes + precedence if provided. + (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + is "true", then the ``client_cert_source`` property can be used + to provide client certificate for mutual TLS transport. If + not provided, the default SSL client certificate will be used if + present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not + set, no client certificate will be used. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + """ + if isinstance(client_options, dict): + client_options = client_options_lib.from_dict(client_options) + if client_options is None: + client_options = client_options_lib.ClientOptions() + client_options = cast(client_options_lib.ClientOptions, client_options) + + api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source(client_options) + + api_key_value = getattr(client_options, "api_key", None) + if api_key_value and credentials: + raise ValueError("client_options.api_key and credentials are mutually exclusive") + + # Save or instantiate the transport. + # Ordinarily, we provide the transport, but allowing a custom transport + # instance provides an extensibility point for unusual situations. + if isinstance(transport, BigtableTableAdminTransport): + # transport is a BigtableTableAdminTransport instance. + if credentials or client_options.credentials_file or api_key_value: + raise ValueError("When providing a transport instance, " + "provide its credentials directly.") + if client_options.scopes: + raise ValueError( + "When providing a transport instance, provide its scopes " + "directly." + ) + self._transport = transport + else: + import google.auth._default # type: ignore + + if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): + credentials = google.auth._default.get_api_key_credentials(api_key_value) + + Transport = type(self).get_transport_class(transport) + self._transport = Transport( + credentials=credentials, + credentials_file=client_options.credentials_file, + host=api_endpoint, + scopes=client_options.scopes, + client_cert_source_for_mtls=client_cert_source_func, + quota_project_id=client_options.quota_project_id, + client_info=client_info, + always_use_jwt_access=True, + api_audience=client_options.api_audience, + ) + + def create_table(self, + request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, + *, + parent: Optional[str] = None, + table_id: Optional[str] = None, + table: Optional[gba_table.Table] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> gba_table.Table: + r"""Creates a new table in the specified instance. + The table can be created with a full set of initial + column families, specified in the request. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + parent (str): + Required. The unique name of the instance in which to + create the table. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table_id (str): + Required. The name by which the new table should be + referred to within the parent instance, e.g., ``foobar`` + rather than ``{parent}/tables/foobar``. Maximum 50 + characters. + + This corresponds to the ``table_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table (google.cloud.bigtable_admin_v2.types.Table): + Required. The Table to create. + This corresponds to the ``table`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, table_id, table]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CreateTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CreateTableRequest): + request = bigtable_table_admin.CreateTableRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if table_id is not None: + request.table_id = table_id + if table is not None: + request.table = table + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def create_table_from_snapshot(self, + request: Optional[Union[bigtable_table_admin.CreateTableFromSnapshotRequest, dict]] = None, + *, + parent: Optional[str] = None, + table_id: Optional[str] = None, + source_snapshot: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a new table from the specified snapshot. The + target table must not exist. The snapshot and the table + must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (str): + Required. The unique name of the instance in which to + create the table. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + table_id (str): + Required. The name by which the new table should be + referred to within the parent instance, e.g., ``foobar`` + rather than ``{parent}/tables/foobar``. + + This corresponds to the ``table_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_snapshot (str): + Required. The unique name of the snapshot from which to + restore the table. The snapshot and the table must be in + the same instance. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``source_snapshot`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, table_id, source_snapshot]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CreateTableFromSnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CreateTableFromSnapshotRequest): + request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if table_id is not None: + request.table_id = table_id + if source_snapshot is not None: + request.source_snapshot = source_snapshot + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_table_from_snapshot] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.CreateTableFromSnapshotMetadata, + ) + + # Done; return the response. + return response + + def list_tables(self, + request: Optional[Union[bigtable_table_admin.ListTablesRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListTablesPager: + r"""Lists all tables served from a specified instance. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + parent (str): + Required. The unique name of the instance for which + tables should be listed. Values are of the form + ``projects/{project}/instances/{instance}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.ListTablesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.ListTablesRequest): + request = bigtable_table_admin.ListTablesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_tables] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListTablesPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def get_table(self, + request: Optional[Union[bigtable_table_admin.GetTableRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Gets metadata information about the specified table. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + name (str): + Required. The unique name of the requested table. Values + are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.GetTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.GetTableRequest): + request = bigtable_table_admin.GetTableRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_table(self, + request: Optional[Union[bigtable_table_admin.UpdateTableRequest, dict]] = None, + *, + table: Optional[gba_table.Table] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Updates a specified table. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]): + The request object. The request for + [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. + table (google.cloud.bigtable_admin_v2.types.Table): + Required. The table to update. The table's ``name`` + field is used to identify the table to update. + + This corresponds to the ``table`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to update. A mask + specifying which fields (e.g. ``change_stream_config``) + in the ``table`` field should be updated. This mask is + relative to the ``table`` field, not to the request + message. The wildcard (*) path is currently not + supported. Currently UpdateTable is only supported for + the following fields: + + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + + If ``column_families`` is set in ``update_mask``, it + will return an UNIMPLEMENTED error. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([table, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.UpdateTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.UpdateTableRequest): + request = bigtable_table_admin.UpdateTableRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if table is not None: + request.table = table + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("table.name", request.table.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + gba_table.Table, + metadata_type=bigtable_table_admin.UpdateTableMetadata, + ) + + # Done; return the response. + return response + + def delete_table(self, + request: Optional[Union[bigtable_table_admin.DeleteTableRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes a specified table and all of its + data. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + name (str): + Required. The unique name of the table to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.DeleteTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.DeleteTableRequest): + request = bigtable_table_admin.DeleteTableRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def undelete_table(self, + request: Optional[Union[bigtable_table_admin.UndeleteTableRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Restores a specified table which was accidentally + deleted. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] + name (str): + Required. The unique name of the table to be restored. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.UndeleteTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.UndeleteTableRequest): + request = bigtable_table_admin.UndeleteTableRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.undelete_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.UndeleteTableMetadata, + ) + + # Done; return the response. + return response + + def modify_column_families(self, + request: Optional[Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict]] = None, + *, + name: Optional[str] = None, + modifications: Optional[MutableSequence[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Table: + r"""Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] + name (str): + Required. The unique name of the table whose families + should be modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + modifications (MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): + Required. Modifications to be + atomically applied to the specified + table's families. Entries are applied in + order, meaning that earlier + modifications can be masked by later + ones (in the case of repeated updates to + the same family, for example). + + This corresponds to the ``modifications`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, modifications]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.ModifyColumnFamiliesRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.ModifyColumnFamiliesRequest): + request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if modifications is not None: + request.modifications = modifications + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.modify_column_families] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def drop_row_range(self, + request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.DropRowRangeRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.DropRowRangeRequest): + request = bigtable_table_admin.DropRowRangeRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.drop_row_range] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def generate_consistency_token(self, + request: Optional[Union[bigtable_table_admin.GenerateConsistencyTokenRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: + r"""Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + name (str): + Required. The unique name of the Table for which to + create a consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.GenerateConsistencyTokenRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.GenerateConsistencyTokenRequest): + request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.generate_consistency_token] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def check_consistency(self, + request: Optional[Union[bigtable_table_admin.CheckConsistencyRequest, dict]] = None, + *, + name: Optional[str] = None, + consistency_token: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> bigtable_table_admin.CheckConsistencyResponse: + r"""Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + name (str): + Required. The unique name of the Table for which to + check replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + consistency_token (str): + Required. The token created using + GenerateConsistencyToken for the Table. + + This corresponds to the ``consistency_token`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, consistency_token]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CheckConsistencyRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CheckConsistencyRequest): + request = bigtable_table_admin.CheckConsistencyRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if consistency_token is not None: + request.consistency_token = consistency_token + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.check_consistency] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def snapshot_table(self, + request: Optional[Union[bigtable_table_admin.SnapshotTableRequest, dict]] = None, + *, + name: Optional[str] = None, + cluster: Optional[str] = None, + snapshot_id: Optional[str] = None, + description: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (str): + Required. The unique name of the table to have the + snapshot taken. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + cluster (str): + Required. The name of the cluster where the snapshot + will be created in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``cluster`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + snapshot_id (str): + Required. The ID by which the new snapshot should be + referred to within the parent cluster, e.g., + ``mysnapshot`` of the form: + ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. + + This corresponds to the ``snapshot_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + description (str): + Description of the snapshot. + This corresponds to the ``description`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a + checkpoint for data restoration or a data source for + a new table. + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name, cluster, snapshot_id, description]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.SnapshotTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.SnapshotTableRequest): + request = bigtable_table_admin.SnapshotTableRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + if cluster is not None: + request.cluster = cluster + if snapshot_id is not None: + request.snapshot_id = snapshot_id + if description is not None: + request.description = description + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.snapshot_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Snapshot, + metadata_type=bigtable_table_admin.SnapshotTableMetadata, + ) + + # Done; return the response. + return response + + def get_snapshot(self, + request: Optional[Union[bigtable_table_admin.GetSnapshotRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Snapshot: + r"""Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (str): + Required. The unique name of the requested snapshot. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Snapshot: + A snapshot of a table at a particular + time. A snapshot can be used as a + checkpoint for data restoration or a + data source for a new table. + + Note: This is a private alpha release of + Cloud Bigtable snapshots. This feature + is not currently available to most Cloud + Bigtable customers. This feature might + be changed in backward-incompatible ways + and is not recommended for production + use. It is not subject to any SLA or + deprecation policy. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.GetSnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.GetSnapshotRequest): + request = bigtable_table_admin.GetSnapshotRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_snapshot] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def list_snapshots(self, + request: Optional[Union[bigtable_table_admin.ListSnapshotsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListSnapshotsPager: + r"""Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + parent (str): + Required. The unique name of the cluster for which + snapshots should be listed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list snapshots for all + clusters in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsPager: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud + Bigtable snapshots. This feature is not currently + available to most Cloud Bigtable customers. This + feature might be changed in backward-incompatible + ways and is not recommended for production use. It is + not subject to any SLA or deprecation policy. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.ListSnapshotsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.ListSnapshotsRequest): + request = bigtable_table_admin.ListSnapshotsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_snapshots] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListSnapshotsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_snapshot(self, + request: Optional[Union[bigtable_table_admin.DeleteSnapshotRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Permanently deletes the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + name (str): + Required. The unique name of the snapshot to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.DeleteSnapshotRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.DeleteSnapshotRequest): + request = bigtable_table_admin.DeleteSnapshotRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_snapshot] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def create_backup(self, + request: Optional[Union[bigtable_table_admin.CreateBackupRequest, dict]] = None, + *, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + backup: Optional[table.Backup] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]): + The request object. The request for + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + parent (str): + Required. This must be one of the clusters in the + instance in which this table is located. The backup will + be stored in this cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (str): + Required. The id of the backup to be created. The + ``backup_id`` along with the parent ``parent`` are + combined as {parent}/backups/{backup_id} to create the + full backup name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to create. + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, backup]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CreateBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CreateBackupRequest): + request = bigtable_table_admin.CreateBackupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if backup is not None: + request.backup = backup + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.create_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CreateBackupMetadata, + ) + + # Done; return the response. + return response + + def get_backup(self, + request: Optional[Union[bigtable_table_admin.GetBackupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Gets metadata on a pending or completed Cloud + Bigtable Backup. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]): + The request object. The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + name (str): + Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.GetBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.GetBackupRequest): + request = bigtable_table_admin.GetBackupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def update_backup(self, + request: Optional[Union[bigtable_table_admin.UpdateBackupRequest, dict]] = None, + *, + backup: Optional[table.Backup] = None, + update_mask: Optional[field_mask_pb2.FieldMask] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> table.Backup: + r"""Updates a pending or completed Cloud Bigtable Backup. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]): + The request object. The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only + supported for the following fields: + + - ``backup.expire_time``. + + This corresponds to the ``backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be + updated. This mask is relative to the Backup resource, + not to the request message. The field mask must always + be specified; this prevents any future fields from being + erased accidentally by clients that do not know about + them. + + This corresponds to the ``update_mask`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.types.Backup: + A backup of a Cloud Bigtable table. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([backup, update_mask]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.UpdateBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.UpdateBackupRequest): + request = bigtable_table_admin.UpdateBackupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if backup is not None: + request.backup = backup + if update_mask is not None: + request.update_mask = update_mask + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.update_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("backup.name", request.backup.name), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def delete_backup(self, + request: Optional[Union[bigtable_table_admin.DeleteBackupRequest, dict]] = None, + *, + name: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> None: + r"""Deletes a pending or completed Cloud Bigtable backup. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]): + The request object. The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + name (str): + Required. Name of the backup to delete. Values are of + the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + + This corresponds to the ``name`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([name]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.DeleteBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.DeleteBackupRequest): + request = bigtable_table_admin.DeleteBackupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if name is not None: + request.name = name + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.delete_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("name", request.name), + )), + ) + + # Send the request. + rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + def list_backups(self, + request: Optional[Union[bigtable_table_admin.ListBackupsRequest, dict]] = None, + *, + parent: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> pagers.ListBackupsPager: + r"""Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]): + The request object. The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + parent (str): + Required. The cluster to list backups from. Values are + of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list backups for all clusters + in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager: + The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Iterating over this object will yield results and + resolve additional pages automatically. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.ListBackupsRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.ListBackupsRequest): + request = bigtable_table_admin.ListBackupsRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.list_backups] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # This method is paged; wrap the response in a pager, which provides + # an `__iter__` convenience method. + response = pagers.ListBackupsPager( + method=rpc, + request=request, + response=response, + metadata=metadata, + ) + + # Done; return the response. + return response + + def restore_table(self, + request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Create a new table by restoring from a completed backup. The + returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. + Each table is served using the resources of its + parent cluster. + + """ + # Create or coerce a protobuf request object. + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.RestoreTableRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.RestoreTableRequest): + request = bigtable_table_admin.RestoreTableRequest(request) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.restore_table] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Table, + metadata_type=bigtable_table_admin.RestoreTableMetadata, + ) + + # Done; return the response. + return response + + def copy_backup(self, + request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, + *, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + source_backup: Optional[str] = None, + expire_time: Optional[timestamp_pb2.Timestamp] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]): + The request object. The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + parent (str): + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already + exists. Values are of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (str): + Required. The id of the new backup. The ``backup_id`` + along with ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup + name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_backup (str): + Required. The source backup to be copied from. The + source backup needs to be in READY state for it to be + copied. Copying a copied backup is not allowed. Once + CopyBackup is in progress, the source backup cannot be + deleted or cleaned up on expiration until CopyBackup is + finished. Values are of the form: + ``projects//instances//clusters//backups/``. + + This corresponds to the ``source_backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Required. Required. The expiration time of the copied + backup with microsecond granularity that must be at + least 6 hours and at most 30 days from the time the + request is received. Once the ``expire_time`` has + passed, Cloud Bigtable will delete the backup and free + the resources used by the backup. + + This corresponds to the ``expire_time`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, source_backup, expire_time]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CopyBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CopyBackupRequest): + request = bigtable_table_admin.CopyBackupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if source_backup is not None: + request.source_backup = source_backup + if expire_time is not None: + request.expire_time = expire_time + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.copy_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("parent", request.parent), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CopyBackupMetadata, + ) + + # Done; return the response. + return response + + def get_iam_policy(self, + request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, + *, + resource: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Gets the access control policy for a Table or Backup + resource. Returns an empty policy if the resource exists + but does not have a policy set. + + Args: + request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): + The request object. Request message for ``GetIamPolicy`` method. + resource (str): + REQUIRED: The resource for which the + policy is being requested. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + request = iam_policy_pb2.GetIamPolicyRequest(**request) + elif not request: + # Null request, just make one. + request = iam_policy_pb2.GetIamPolicyRequest() + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("resource", request.resource), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def set_iam_policy(self, + request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, + *, + resource: Optional[str] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> policy_pb2.Policy: + r"""Sets the access control policy on a Table or Backup + resource. Replaces any existing policy. + + Args: + request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): + The request object. Request message for ``SetIamPolicy`` method. + resource (str): + REQUIRED: The resource for which the + policy is being specified. See the + operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which specifies access + controls for Google Cloud resources. + + A Policy is a collection of bindings. A binding binds + one or more members, or principals, to a single role. + Principals can be user accounts, service accounts, + Google groups, and domains (such as G Suite). A role + is a named list of permissions; each role can be an + IAM predefined role or a user-created custom role. + + For some types of Google Cloud resources, a binding + can also specify a condition, which is a logical + expression that allows access to a resource only if + the expression evaluates to true. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the [IAM + documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). + + **JSON example:** + + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` + + **YAML example:** + + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` + + For a description of IAM and its features, see the + [IAM + documentation](\ https://cloud.google.com/iam/docs/). + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + request = iam_policy_pb2.SetIamPolicyRequest(**request) + elif not request: + # Null request, just make one. + request = iam_policy_pb2.SetIamPolicyRequest() + if resource is not None: + request.resource = resource + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("resource", request.resource), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def test_iam_permissions(self, + request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, + *, + resource: Optional[str] = None, + permissions: Optional[MutableSequence[str]] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Returns permissions that the caller has on the + specified Table or Backup resource. + + Args: + request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): + The request object. Request message for ``TestIamPermissions`` method. + resource (str): + REQUIRED: The resource for which the + policy detail is being requested. See + the operation documentation for the + appropriate value for this field. + + This corresponds to the ``resource`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + permissions (MutableSequence[str]): + The set of permissions to check for the ``resource``. + Permissions with wildcards (such as '*' or 'storage.*') + are not allowed. For more information see `IAM + Overview `__. + + This corresponds to the ``permissions`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: + Response message for TestIamPermissions method. + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([resource, permissions]) + if request is not None and has_flattened_params: + raise ValueError('If the `request` argument is set, then none of ' + 'the individual field arguments should be set.') + + if isinstance(request, dict): + # The request isn't a proto-plus wrapped type, + # so it must be constructed via keyword expansion. + request = iam_policy_pb2.TestIamPermissionsRequest(**request) + elif not request: + # Null request, just make one. + request = iam_policy_pb2.TestIamPermissionsRequest() + if resource is not None: + request.resource = resource + if permissions: + request.permissions.extend(permissions) + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ("resource", request.resource), + )), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Done; return the response. + return response + + def __enter__(self) -> "BigtableTableAdminClient": + return self + + def __exit__(self, type, value, traceback): + """Releases underlying transport's resources. + + .. warning:: + ONLY use as a context manager if the transport is NOT shared + with other clients! Exiting the with block will CLOSE the transport + and may cause errors in other clients! + """ + self.transport.close() + + + + + + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +__all__ = ( + "BigtableTableAdminClient", +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py new file mode 100644 index 000000000..adf3d6e23 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -0,0 +1,382 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table + + +class ListTablesPager: + """A pager for iterating through ``list_tables`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` object, and + provides an ``__iter__`` method to iterate through its + ``tables`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListTables`` requests and continue to iterate + through the ``tables`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., bigtable_table_admin.ListTablesResponse], + request: bigtable_table_admin.ListTablesRequest, + response: bigtable_table_admin.ListTablesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListTablesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListTablesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_table_admin.ListTablesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[table.Table]: + for page in self.pages: + yield from page.tables + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListTablesAsyncPager: + """A pager for iterating through ``list_tables`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``tables`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListTables`` requests and continue to iterate + through the ``tables`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[bigtable_table_admin.ListTablesResponse]], + request: bigtable_table_admin.ListTablesRequest, + response: bigtable_table_admin.ListTablesResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListTablesRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListTablesRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[bigtable_table_admin.ListTablesResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[table.Table]: + async def async_generator(): + async for page in self.pages: + for response in page.tables: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListSnapshotsPager: + """A pager for iterating through ``list_snapshots`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``snapshots`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListSnapshots`` requests and continue to iterate + through the ``snapshots`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., bigtable_table_admin.ListSnapshotsResponse], + request: bigtable_table_admin.ListSnapshotsRequest, + response: bigtable_table_admin.ListSnapshotsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListSnapshotsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_table_admin.ListSnapshotsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[table.Snapshot]: + for page in self.pages: + yield from page.snapshots + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListSnapshotsAsyncPager: + """A pager for iterating through ``list_snapshots`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``snapshots`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListSnapshots`` requests and continue to iterate + through the ``snapshots`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[bigtable_table_admin.ListSnapshotsResponse]], + request: bigtable_table_admin.ListSnapshotsRequest, + response: bigtable_table_admin.ListSnapshotsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListSnapshotsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[bigtable_table_admin.ListSnapshotsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[table.Snapshot]: + async def async_generator(): + async for page in self.pages: + for response in page.snapshots: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListBackupsPager: + """A pager for iterating through ``list_backups`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` object, and + provides an ``__iter__`` method to iterate through its + ``backups`` field. + + If there are more pages, the ``__iter__`` method will make additional + ``ListBackups`` requests and continue to iterate + through the ``backups`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., bigtable_table_admin.ListBackupsResponse], + request: bigtable_table_admin.ListBackupsRequest, + response: bigtable_table_admin.ListBackupsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiate the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListBackupsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListBackupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + def pages(self) -> Iterator[bigtable_table_admin.ListBackupsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = self._method(self._request, metadata=self._metadata) + yield self._response + + def __iter__(self) -> Iterator[table.Backup]: + for page in self.pages: + yield from page.backups + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) + + +class ListBackupsAsyncPager: + """A pager for iterating through ``list_backups`` requests. + + This class thinly wraps an initial + :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` object, and + provides an ``__aiter__`` method to iterate through its + ``backups`` field. + + If there are more pages, the ``__aiter__`` method will make additional + ``ListBackups`` requests and continue to iterate + through the ``backups`` field on the + corresponding responses. + + All the usual :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` + attributes are available on the pager. If multiple requests are made, only + the most recent response is retained, and thus used for attribute lookup. + """ + def __init__(self, + method: Callable[..., Awaitable[bigtable_table_admin.ListBackupsResponse]], + request: bigtable_table_admin.ListBackupsRequest, + response: bigtable_table_admin.ListBackupsResponse, + *, + metadata: Sequence[Tuple[str, str]] = ()): + """Instantiates the pager. + + Args: + method (Callable): The method that was originally called, and + which instantiated this pager. + request (google.cloud.bigtable_admin_v2.types.ListBackupsRequest): + The initial request object. + response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): + The initial response object. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + self._method = method + self._request = bigtable_table_admin.ListBackupsRequest(request) + self._response = response + self._metadata = metadata + + def __getattr__(self, name: str) -> Any: + return getattr(self._response, name) + + @property + async def pages(self) -> AsyncIterator[bigtable_table_admin.ListBackupsResponse]: + yield self._response + while self._response.next_page_token: + self._request.page_token = self._response.next_page_token + self._response = await self._method(self._request, metadata=self._metadata) + yield self._response + def __aiter__(self) -> AsyncIterator[table.Backup]: + async def async_generator(): + async for page in self.pages: + for response in page.backups: + yield response + + return async_generator() + + def __repr__(self) -> str: + return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py new file mode 100644 index 000000000..e2256b316 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from collections import OrderedDict +from typing import Dict, Type + +from .base import BigtableTableAdminTransport +from .grpc import BigtableTableAdminGrpcTransport +from .grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport +from .rest import BigtableTableAdminRestTransport +from .rest import BigtableTableAdminRestInterceptor + + +# Compile a registry of transports. +_transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTableAdminTransport]] +_transport_registry['grpc'] = BigtableTableAdminGrpcTransport +_transport_registry['grpc_asyncio'] = BigtableTableAdminGrpcAsyncIOTransport +_transport_registry['rest'] = BigtableTableAdminRestTransport + +__all__ = ( + 'BigtableTableAdminTransport', + 'BigtableTableAdminGrpcTransport', + 'BigtableTableAdminGrpcAsyncIOTransport', + 'BigtableTableAdminRestTransport', + 'BigtableTableAdminRestInterceptor', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py new file mode 100644 index 000000000..702cc4212 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -0,0 +1,571 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import abc +from typing import Awaitable, Callable, Dict, Optional, Sequence, Union + +from google.cloud.bigtable_admin_v2 import gapic_version as package_version + +import google.auth # type: ignore +import google.api_core +from google.api_core import exceptions as core_exceptions +from google.api_core import gapic_v1 +from google.api_core import retry as retries +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.oauth2 import service_account # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) + + +class BigtableTableAdminTransport(abc.ABC): + """Abstract transport class for BigtableTableAdmin.""" + + AUTH_SCOPES = ( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', + ) + + DEFAULT_HOST: str = 'bigtableadmin.googleapis.com' + def __init__( + self, *, + host: str = DEFAULT_HOST, + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + **kwargs, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A list of scopes. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + """ + + scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} + + # Save the scopes. + self._scopes = scopes + + # If no credentials are provided, then determine the appropriate + # defaults. + if credentials and credentials_file: + raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") + + if credentials_file is not None: + credentials, _ = google.auth.load_credentials_from_file( + credentials_file, + **scopes_kwargs, + quota_project_id=quota_project_id + ) + elif credentials is None: + credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) + # Don't apply audience if the credentials file passed from user. + if hasattr(credentials, "with_gdch_audience"): + credentials = credentials.with_gdch_audience(api_audience if api_audience else host) + + # If the credentials are service account credentials, then always try to use self signed JWT. + if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): + credentials = credentials.with_always_use_jwt_access(True) + + # Save the credentials. + self._credentials = credentials + + # Save the hostname. Default to port 443 (HTTPS) if none is specified. + if ':' not in host: + host += ':443' + self._host = host + + def _prep_wrapped_messages(self, client_info): + # Precompute the wrapped methods. + self._wrapped_methods = { + self.create_table: gapic_v1.method.wrap_method( + self.create_table, + default_timeout=300.0, + client_info=client_info, + ), + self.create_table_from_snapshot: gapic_v1.method.wrap_method( + self.create_table_from_snapshot, + default_timeout=None, + client_info=client_info, + ), + self.list_tables: gapic_v1.method.wrap_method( + self.list_tables, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.get_table: gapic_v1.method.wrap_method( + self.get_table, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_table: gapic_v1.method.wrap_method( + self.update_table, + default_timeout=None, + client_info=client_info, + ), + self.delete_table: gapic_v1.method.wrap_method( + self.delete_table, + default_timeout=60.0, + client_info=client_info, + ), + self.undelete_table: gapic_v1.method.wrap_method( + self.undelete_table, + default_timeout=None, + client_info=client_info, + ), + self.modify_column_families: gapic_v1.method.wrap_method( + self.modify_column_families, + default_timeout=300.0, + client_info=client_info, + ), + self.drop_row_range: gapic_v1.method.wrap_method( + self.drop_row_range, + default_timeout=3600.0, + client_info=client_info, + ), + self.generate_consistency_token: gapic_v1.method.wrap_method( + self.generate_consistency_token, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.check_consistency: gapic_v1.method.wrap_method( + self.check_consistency, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.snapshot_table: gapic_v1.method.wrap_method( + self.snapshot_table, + default_timeout=None, + client_info=client_info, + ), + self.get_snapshot: gapic_v1.method.wrap_method( + self.get_snapshot, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.list_snapshots: gapic_v1.method.wrap_method( + self.list_snapshots, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.delete_snapshot: gapic_v1.method.wrap_method( + self.delete_snapshot, + default_timeout=60.0, + client_info=client_info, + ), + self.create_backup: gapic_v1.method.wrap_method( + self.create_backup, + default_timeout=60.0, + client_info=client_info, + ), + self.get_backup: gapic_v1.method.wrap_method( + self.get_backup, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.update_backup: gapic_v1.method.wrap_method( + self.update_backup, + default_timeout=60.0, + client_info=client_info, + ), + self.delete_backup: gapic_v1.method.wrap_method( + self.delete_backup, + default_timeout=60.0, + client_info=client_info, + ), + self.list_backups: gapic_v1.method.wrap_method( + self.list_backups, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.restore_table: gapic_v1.method.wrap_method( + self.restore_table, + default_timeout=60.0, + client_info=client_info, + ), + self.copy_backup: gapic_v1.method.wrap_method( + self.copy_backup, + default_timeout=None, + client_info=client_info, + ), + self.get_iam_policy: gapic_v1.method.wrap_method( + self.get_iam_policy, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + self.set_iam_policy: gapic_v1.method.wrap_method( + self.set_iam_policy, + default_timeout=60.0, + client_info=client_info, + ), + self.test_iam_permissions: gapic_v1.method.wrap_method( + self.test_iam_permissions, + default_retry=retries.Retry( +initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( + core_exceptions.DeadlineExceeded, + core_exceptions.ServiceUnavailable, + ), + deadline=60.0, + ), + default_timeout=60.0, + client_info=client_info, + ), + } + + def close(self): + """Closes resources associated with the transport. + + .. warning:: + Only call this method if the transport is NOT shared + with other clients - this may cause errors in other clients! + """ + raise NotImplementedError() + + @property + def operations_client(self): + """Return the client designed to process long-running operations.""" + raise NotImplementedError() + + @property + def create_table(self) -> Callable[ + [bigtable_table_admin.CreateTableRequest], + Union[ + gba_table.Table, + Awaitable[gba_table.Table] + ]]: + raise NotImplementedError() + + @property + def create_table_from_snapshot(self) -> Callable[ + [bigtable_table_admin.CreateTableFromSnapshotRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def list_tables(self) -> Callable[ + [bigtable_table_admin.ListTablesRequest], + Union[ + bigtable_table_admin.ListTablesResponse, + Awaitable[bigtable_table_admin.ListTablesResponse] + ]]: + raise NotImplementedError() + + @property + def get_table(self) -> Callable[ + [bigtable_table_admin.GetTableRequest], + Union[ + table.Table, + Awaitable[table.Table] + ]]: + raise NotImplementedError() + + @property + def update_table(self) -> Callable[ + [bigtable_table_admin.UpdateTableRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def delete_table(self) -> Callable[ + [bigtable_table_admin.DeleteTableRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def undelete_table(self) -> Callable[ + [bigtable_table_admin.UndeleteTableRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def modify_column_families(self) -> Callable[ + [bigtable_table_admin.ModifyColumnFamiliesRequest], + Union[ + table.Table, + Awaitable[table.Table] + ]]: + raise NotImplementedError() + + @property + def drop_row_range(self) -> Callable[ + [bigtable_table_admin.DropRowRangeRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def generate_consistency_token(self) -> Callable[ + [bigtable_table_admin.GenerateConsistencyTokenRequest], + Union[ + bigtable_table_admin.GenerateConsistencyTokenResponse, + Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse] + ]]: + raise NotImplementedError() + + @property + def check_consistency(self) -> Callable[ + [bigtable_table_admin.CheckConsistencyRequest], + Union[ + bigtable_table_admin.CheckConsistencyResponse, + Awaitable[bigtable_table_admin.CheckConsistencyResponse] + ]]: + raise NotImplementedError() + + @property + def snapshot_table(self) -> Callable[ + [bigtable_table_admin.SnapshotTableRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_snapshot(self) -> Callable[ + [bigtable_table_admin.GetSnapshotRequest], + Union[ + table.Snapshot, + Awaitable[table.Snapshot] + ]]: + raise NotImplementedError() + + @property + def list_snapshots(self) -> Callable[ + [bigtable_table_admin.ListSnapshotsRequest], + Union[ + bigtable_table_admin.ListSnapshotsResponse, + Awaitable[bigtable_table_admin.ListSnapshotsResponse] + ]]: + raise NotImplementedError() + + @property + def delete_snapshot(self) -> Callable[ + [bigtable_table_admin.DeleteSnapshotRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def create_backup(self) -> Callable[ + [bigtable_table_admin.CreateBackupRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_backup(self) -> Callable[ + [bigtable_table_admin.GetBackupRequest], + Union[ + table.Backup, + Awaitable[table.Backup] + ]]: + raise NotImplementedError() + + @property + def update_backup(self) -> Callable[ + [bigtable_table_admin.UpdateBackupRequest], + Union[ + table.Backup, + Awaitable[table.Backup] + ]]: + raise NotImplementedError() + + @property + def delete_backup(self) -> Callable[ + [bigtable_table_admin.DeleteBackupRequest], + Union[ + empty_pb2.Empty, + Awaitable[empty_pb2.Empty] + ]]: + raise NotImplementedError() + + @property + def list_backups(self) -> Callable[ + [bigtable_table_admin.ListBackupsRequest], + Union[ + bigtable_table_admin.ListBackupsResponse, + Awaitable[bigtable_table_admin.ListBackupsResponse] + ]]: + raise NotImplementedError() + + @property + def restore_table(self) -> Callable[ + [bigtable_table_admin.RestoreTableRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def copy_backup(self) -> Callable[ + [bigtable_table_admin.CopyBackupRequest], + Union[ + operations_pb2.Operation, + Awaitable[operations_pb2.Operation] + ]]: + raise NotImplementedError() + + @property + def get_iam_policy(self) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Union[ + policy_pb2.Policy, + Awaitable[policy_pb2.Policy] + ]]: + raise NotImplementedError() + + @property + def set_iam_policy(self) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Union[ + policy_pb2.Policy, + Awaitable[policy_pb2.Policy] + ]]: + raise NotImplementedError() + + @property + def test_iam_permissions(self) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Union[ + iam_policy_pb2.TestIamPermissionsResponse, + Awaitable[iam_policy_pb2.TestIamPermissionsResponse] + ]]: + raise NotImplementedError() + + @property + def kind(self) -> str: + raise NotImplementedError() + + +__all__ = ( + 'BigtableTableAdminTransport', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py new file mode 100644 index 000000000..00c6a6daf --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -0,0 +1,996 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import grpc_helpers +from google.api_core import operations_v1 +from google.api_core import gapic_v1 +import google.auth # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO + + +class BigtableTableAdminGrpcTransport(BigtableTableAdminTransport): + """gRPC backend transport for BigtableTableAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + _stubs: Dict[str, Callable] + + def __init__(self, *, + host: str = 'bigtableadmin.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[grpc.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + channel (Optional[grpc.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @classmethod + def create_channel(cls, + host: str = 'bigtableadmin.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> grpc.Channel: + """Create and return a gRPC channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is mutually exclusive with credentials. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + grpc.Channel: A gRPC channel object. + + Raises: + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + + return grpc_helpers.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + @property + def grpc_channel(self) -> grpc.Channel: + """Return the channel designed to connect to this service. + """ + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_table(self) -> Callable[ + [bigtable_table_admin.CreateTableRequest], + gba_table.Table]: + r"""Return a callable for the create table method over gRPC. + + Creates a new table in the specified instance. + The table can be created with a full set of initial + column families, specified in the request. + + Returns: + Callable[[~.CreateTableRequest], + ~.Table]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_table' not in self._stubs: + self._stubs['create_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable', + request_serializer=bigtable_table_admin.CreateTableRequest.serialize, + response_deserializer=gba_table.Table.deserialize, + ) + return self._stubs['create_table'] + + @property + def create_table_from_snapshot(self) -> Callable[ + [bigtable_table_admin.CreateTableFromSnapshotRequest], + operations_pb2.Operation]: + r"""Return a callable for the create table from snapshot method over gRPC. + + Creates a new table from the specified snapshot. The + target table must not exist. The snapshot and the table + must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.CreateTableFromSnapshotRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_table_from_snapshot' not in self._stubs: + self._stubs['create_table_from_snapshot'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot', + request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_table_from_snapshot'] + + @property + def list_tables(self) -> Callable[ + [bigtable_table_admin.ListTablesRequest], + bigtable_table_admin.ListTablesResponse]: + r"""Return a callable for the list tables method over gRPC. + + Lists all tables served from a specified instance. + + Returns: + Callable[[~.ListTablesRequest], + ~.ListTablesResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tables' not in self._stubs: + self._stubs['list_tables'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ListTables', + request_serializer=bigtable_table_admin.ListTablesRequest.serialize, + response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, + ) + return self._stubs['list_tables'] + + @property + def get_table(self) -> Callable[ + [bigtable_table_admin.GetTableRequest], + table.Table]: + r"""Return a callable for the get table method over gRPC. + + Gets metadata information about the specified table. + + Returns: + Callable[[~.GetTableRequest], + ~.Table]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_table' not in self._stubs: + self._stubs['get_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GetTable', + request_serializer=bigtable_table_admin.GetTableRequest.serialize, + response_deserializer=table.Table.deserialize, + ) + return self._stubs['get_table'] + + @property + def update_table(self) -> Callable[ + [bigtable_table_admin.UpdateTableRequest], + operations_pb2.Operation]: + r"""Return a callable for the update table method over gRPC. + + Updates a specified table. + + Returns: + Callable[[~.UpdateTableRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_table' not in self._stubs: + self._stubs['update_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable', + request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_table'] + + @property + def delete_table(self) -> Callable[ + [bigtable_table_admin.DeleteTableRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete table method over gRPC. + + Permanently deletes a specified table and all of its + data. + + Returns: + Callable[[~.DeleteTableRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_table' not in self._stubs: + self._stubs['delete_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable', + request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_table'] + + @property + def undelete_table(self) -> Callable[ + [bigtable_table_admin.UndeleteTableRequest], + operations_pb2.Operation]: + r"""Return a callable for the undelete table method over gRPC. + + Restores a specified table which was accidentally + deleted. + + Returns: + Callable[[~.UndeleteTableRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undelete_table' not in self._stubs: + self._stubs['undelete_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable', + request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undelete_table'] + + @property + def modify_column_families(self) -> Callable[ + [bigtable_table_admin.ModifyColumnFamiliesRequest], + table.Table]: + r"""Return a callable for the modify column families method over gRPC. + + Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + Returns: + Callable[[~.ModifyColumnFamiliesRequest], + ~.Table]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'modify_column_families' not in self._stubs: + self._stubs['modify_column_families'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies', + request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, + response_deserializer=table.Table.deserialize, + ) + return self._stubs['modify_column_families'] + + @property + def drop_row_range(self) -> Callable[ + [bigtable_table_admin.DropRowRangeRequest], + empty_pb2.Empty]: + r"""Return a callable for the drop row range method over gRPC. + + Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. + + Returns: + Callable[[~.DropRowRangeRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'drop_row_range' not in self._stubs: + self._stubs['drop_row_range'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange', + request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['drop_row_range'] + + @property + def generate_consistency_token(self) -> Callable[ + [bigtable_table_admin.GenerateConsistencyTokenRequest], + bigtable_table_admin.GenerateConsistencyTokenResponse]: + r"""Return a callable for the generate consistency token method over gRPC. + + Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. + + Returns: + Callable[[~.GenerateConsistencyTokenRequest], + ~.GenerateConsistencyTokenResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'generate_consistency_token' not in self._stubs: + self._stubs['generate_consistency_token'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken', + request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, + response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, + ) + return self._stubs['generate_consistency_token'] + + @property + def check_consistency(self) -> Callable[ + [bigtable_table_admin.CheckConsistencyRequest], + bigtable_table_admin.CheckConsistencyResponse]: + r"""Return a callable for the check consistency method over gRPC. + + Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + Returns: + Callable[[~.CheckConsistencyRequest], + ~.CheckConsistencyResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'check_consistency' not in self._stubs: + self._stubs['check_consistency'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency', + request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, + response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, + ) + return self._stubs['check_consistency'] + + @property + def snapshot_table(self) -> Callable[ + [bigtable_table_admin.SnapshotTableRequest], + operations_pb2.Operation]: + r"""Return a callable for the snapshot table method over gRPC. + + Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.SnapshotTableRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'snapshot_table' not in self._stubs: + self._stubs['snapshot_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable', + request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['snapshot_table'] + + @property + def get_snapshot(self) -> Callable[ + [bigtable_table_admin.GetSnapshotRequest], + table.Snapshot]: + r"""Return a callable for the get snapshot method over gRPC. + + Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.GetSnapshotRequest], + ~.Snapshot]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_snapshot' not in self._stubs: + self._stubs['get_snapshot'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot', + request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, + response_deserializer=table.Snapshot.deserialize, + ) + return self._stubs['get_snapshot'] + + @property + def list_snapshots(self) -> Callable[ + [bigtable_table_admin.ListSnapshotsRequest], + bigtable_table_admin.ListSnapshotsResponse]: + r"""Return a callable for the list snapshots method over gRPC. + + Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.ListSnapshotsRequest], + ~.ListSnapshotsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_snapshots' not in self._stubs: + self._stubs['list_snapshots'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots', + request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, + response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, + ) + return self._stubs['list_snapshots'] + + @property + def delete_snapshot(self) -> Callable[ + [bigtable_table_admin.DeleteSnapshotRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete snapshot method over gRPC. + + Permanently deletes the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.DeleteSnapshotRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_snapshot' not in self._stubs: + self._stubs['delete_snapshot'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot', + request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_snapshot'] + + @property + def create_backup(self) -> Callable[ + [bigtable_table_admin.CreateBackupRequest], + operations_pb2.Operation]: + r"""Return a callable for the create backup method over gRPC. + + Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + Returns: + Callable[[~.CreateBackupRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_backup' not in self._stubs: + self._stubs['create_backup'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup', + request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_backup'] + + @property + def get_backup(self) -> Callable[ + [bigtable_table_admin.GetBackupRequest], + table.Backup]: + r"""Return a callable for the get backup method over gRPC. + + Gets metadata on a pending or completed Cloud + Bigtable Backup. + + Returns: + Callable[[~.GetBackupRequest], + ~.Backup]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_backup' not in self._stubs: + self._stubs['get_backup'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup', + request_serializer=bigtable_table_admin.GetBackupRequest.serialize, + response_deserializer=table.Backup.deserialize, + ) + return self._stubs['get_backup'] + + @property + def update_backup(self) -> Callable[ + [bigtable_table_admin.UpdateBackupRequest], + table.Backup]: + r"""Return a callable for the update backup method over gRPC. + + Updates a pending or completed Cloud Bigtable Backup. + + Returns: + Callable[[~.UpdateBackupRequest], + ~.Backup]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_backup' not in self._stubs: + self._stubs['update_backup'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup', + request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, + response_deserializer=table.Backup.deserialize, + ) + return self._stubs['update_backup'] + + @property + def delete_backup(self) -> Callable[ + [bigtable_table_admin.DeleteBackupRequest], + empty_pb2.Empty]: + r"""Return a callable for the delete backup method over gRPC. + + Deletes a pending or completed Cloud Bigtable backup. + + Returns: + Callable[[~.DeleteBackupRequest], + ~.Empty]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_backup' not in self._stubs: + self._stubs['delete_backup'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup', + request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_backup'] + + @property + def list_backups(self) -> Callable[ + [bigtable_table_admin.ListBackupsRequest], + bigtable_table_admin.ListBackupsResponse]: + r"""Return a callable for the list backups method over gRPC. + + Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + Returns: + Callable[[~.ListBackupsRequest], + ~.ListBackupsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_backups' not in self._stubs: + self._stubs['list_backups'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups', + request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, + response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, + ) + return self._stubs['list_backups'] + + @property + def restore_table(self) -> Callable[ + [bigtable_table_admin.RestoreTableRequest], + operations_pb2.Operation]: + r"""Return a callable for the restore table method over gRPC. + + Create a new table by restoring from a completed backup. The + returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + Returns: + Callable[[~.RestoreTableRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'restore_table' not in self._stubs: + self._stubs['restore_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable', + request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['restore_table'] + + @property + def copy_backup(self) -> Callable[ + [bigtable_table_admin.CopyBackupRequest], + operations_pb2.Operation]: + r"""Return a callable for the copy backup method over gRPC. + + Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Returns: + Callable[[~.CopyBackupRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'copy_backup' not in self._stubs: + self._stubs['copy_backup'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup', + request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['copy_backup'] + + @property + def get_iam_policy(self) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + policy_pb2.Policy]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for a Table or Backup + resource. Returns an empty policy if the resource exists + but does not have a policy set. + + Returns: + Callable[[~.GetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_iam_policy' not in self._stubs: + self._stubs['get_iam_policy'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy', + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs['get_iam_policy'] + + @property + def set_iam_policy(self) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + policy_pb2.Policy]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on a Table or Backup + resource. Replaces any existing policy. + + Returns: + Callable[[~.SetIamPolicyRequest], + ~.Policy]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_iam_policy' not in self._stubs: + self._stubs['set_iam_policy'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy', + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs['set_iam_policy'] + + @property + def test_iam_permissions(self) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the + specified Table or Backup resource. + + Returns: + Callable[[~.TestIamPermissionsRequest], + ~.TestIamPermissionsResponse]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'test_iam_permissions' not in self._stubs: + self._stubs['test_iam_permissions'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions', + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs['test_iam_permissions'] + + def close(self): + self.grpc_channel.close() + + @property + def kind(self) -> str: + return "grpc" + + +__all__ = ( + 'BigtableTableAdminGrpcTransport', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py new file mode 100644 index 000000000..bb13389a6 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -0,0 +1,995 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import warnings +from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union + +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers_async +from google.api_core import operations_v1 +from google.auth import credentials as ga_credentials # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore + +import grpc # type: ignore +from grpc.experimental import aio # type: ignore + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO +from .grpc import BigtableTableAdminGrpcTransport + + +class BigtableTableAdminGrpcAsyncIOTransport(BigtableTableAdminTransport): + """gRPC AsyncIO backend transport for BigtableTableAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends protocol buffers over the wire using gRPC (which is built on + top of HTTP/2); the ``grpcio`` package must be installed. + """ + + _grpc_channel: aio.Channel + _stubs: Dict[str, Callable] = {} + + @classmethod + def create_channel(cls, + host: str = 'bigtableadmin.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + quota_project_id: Optional[str] = None, + **kwargs) -> aio.Channel: + """Create and return a gRPC AsyncIO channel object. + Args: + host (Optional[str]): The host for the channel to use. + credentials (Optional[~.Credentials]): The + authorization credentials to attach to requests. These + credentials identify this application to the service. If + none are specified, the client will attempt to ascertain + the credentials from the environment. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + kwargs (Optional[dict]): Keyword arguments, which are passed to the + channel creation. + Returns: + aio.Channel: A gRPC AsyncIO channel object. + """ + + return grpc_helpers_async.create_channel( + host, + credentials=credentials, + credentials_file=credentials_file, + quota_project_id=quota_project_id, + default_scopes=cls.AUTH_SCOPES, + scopes=scopes, + default_host=cls.DEFAULT_HOST, + **kwargs + ) + + def __init__(self, *, + host: str = 'bigtableadmin.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + channel: Optional[aio.Channel] = None, + api_mtls_endpoint: Optional[str] = None, + client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, + client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + This argument is ignored if ``channel`` is provided. + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional[Sequence[str]]): A optional list of scopes needed for this + service. These are only used when credentials are not specified and + are passed to :func:`google.auth.default`. + channel (Optional[aio.Channel]): A ``Channel`` instance through + which to make calls. + api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. + If provided, it overrides the ``host`` argument and tries to create + a mutual TLS channel with client SSL credentials from + ``client_cert_source`` or application default SSL credentials. + client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): + Deprecated. A callback to provide client SSL certificate bytes and + private key bytes, both in PEM format. It is ignored if + ``api_mtls_endpoint`` is None. + ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials + for the grpc channel. It is ignored if ``channel`` is provided. + client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): + A callback to provide client certificate bytes and private key bytes, + both in PEM format. It is used to configure a mutual TLS channel. It is + ignored if ``channel`` or ``ssl_channel_credentials`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + + Raises: + google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport + creation failed for any reason. + google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` + and ``credentials_file`` are passed. + """ + self._grpc_channel = None + self._ssl_channel_credentials = ssl_channel_credentials + self._stubs: Dict[str, Callable] = {} + self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None + + if api_mtls_endpoint: + warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) + if client_cert_source: + warnings.warn("client_cert_source is deprecated", DeprecationWarning) + + if channel: + # Ignore credentials if a channel was passed. + credentials = False + # If a channel was explicitly provided, set it. + self._grpc_channel = channel + self._ssl_channel_credentials = None + else: + if api_mtls_endpoint: + host = api_mtls_endpoint + + # Create SSL credentials with client_cert_source or application + # default SSL credentials. + if client_cert_source: + cert, key = client_cert_source() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + else: + self._ssl_channel_credentials = SslCredentials().ssl_credentials + + else: + if client_cert_source_for_mtls and not ssl_channel_credentials: + cert, key = client_cert_source_for_mtls() + self._ssl_channel_credentials = grpc.ssl_channel_credentials( + certificate_chain=cert, private_key=key + ) + + # The base transport sets the host, credentials and scopes + super().__init__( + host=host, + credentials=credentials, + credentials_file=credentials_file, + scopes=scopes, + quota_project_id=quota_project_id, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience, + ) + + if not self._grpc_channel: + self._grpc_channel = type(self).create_channel( + self._host, + # use the credentials which are saved + credentials=self._credentials, + # Set ``credentials_file`` to ``None`` here as + # the credentials that we saved earlier should be used. + credentials_file=None, + scopes=self._scopes, + ssl_credentials=self._ssl_channel_credentials, + quota_project_id=quota_project_id, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Wrap messages. This must be done after self._grpc_channel exists + self._prep_wrapped_messages(client_info) + + @property + def grpc_channel(self) -> aio.Channel: + """Create the channel designed to connect to this service. + + This property caches on the instance; repeated calls return + the same channel. + """ + # Return the channel from cache. + return self._grpc_channel + + @property + def operations_client(self) -> operations_v1.OperationsAsyncClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Quick check: Only create a new client if we do not already have one. + if self._operations_client is None: + self._operations_client = operations_v1.OperationsAsyncClient( + self.grpc_channel + ) + + # Return the client from cache. + return self._operations_client + + @property + def create_table(self) -> Callable[ + [bigtable_table_admin.CreateTableRequest], + Awaitable[gba_table.Table]]: + r"""Return a callable for the create table method over gRPC. + + Creates a new table in the specified instance. + The table can be created with a full set of initial + column families, specified in the request. + + Returns: + Callable[[~.CreateTableRequest], + Awaitable[~.Table]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_table' not in self._stubs: + self._stubs['create_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable', + request_serializer=bigtable_table_admin.CreateTableRequest.serialize, + response_deserializer=gba_table.Table.deserialize, + ) + return self._stubs['create_table'] + + @property + def create_table_from_snapshot(self) -> Callable[ + [bigtable_table_admin.CreateTableFromSnapshotRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create table from snapshot method over gRPC. + + Creates a new table from the specified snapshot. The + target table must not exist. The snapshot and the table + must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.CreateTableFromSnapshotRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_table_from_snapshot' not in self._stubs: + self._stubs['create_table_from_snapshot'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot', + request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_table_from_snapshot'] + + @property + def list_tables(self) -> Callable[ + [bigtable_table_admin.ListTablesRequest], + Awaitable[bigtable_table_admin.ListTablesResponse]]: + r"""Return a callable for the list tables method over gRPC. + + Lists all tables served from a specified instance. + + Returns: + Callable[[~.ListTablesRequest], + Awaitable[~.ListTablesResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_tables' not in self._stubs: + self._stubs['list_tables'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ListTables', + request_serializer=bigtable_table_admin.ListTablesRequest.serialize, + response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, + ) + return self._stubs['list_tables'] + + @property + def get_table(self) -> Callable[ + [bigtable_table_admin.GetTableRequest], + Awaitable[table.Table]]: + r"""Return a callable for the get table method over gRPC. + + Gets metadata information about the specified table. + + Returns: + Callable[[~.GetTableRequest], + Awaitable[~.Table]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_table' not in self._stubs: + self._stubs['get_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GetTable', + request_serializer=bigtable_table_admin.GetTableRequest.serialize, + response_deserializer=table.Table.deserialize, + ) + return self._stubs['get_table'] + + @property + def update_table(self) -> Callable[ + [bigtable_table_admin.UpdateTableRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the update table method over gRPC. + + Updates a specified table. + + Returns: + Callable[[~.UpdateTableRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_table' not in self._stubs: + self._stubs['update_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable', + request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['update_table'] + + @property + def delete_table(self) -> Callable[ + [bigtable_table_admin.DeleteTableRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete table method over gRPC. + + Permanently deletes a specified table and all of its + data. + + Returns: + Callable[[~.DeleteTableRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_table' not in self._stubs: + self._stubs['delete_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable', + request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_table'] + + @property + def undelete_table(self) -> Callable[ + [bigtable_table_admin.UndeleteTableRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the undelete table method over gRPC. + + Restores a specified table which was accidentally + deleted. + + Returns: + Callable[[~.UndeleteTableRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'undelete_table' not in self._stubs: + self._stubs['undelete_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable', + request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['undelete_table'] + + @property + def modify_column_families(self) -> Callable[ + [bigtable_table_admin.ModifyColumnFamiliesRequest], + Awaitable[table.Table]]: + r"""Return a callable for the modify column families method over gRPC. + + Performs a series of column family modifications on + the specified table. Either all or none of the + modifications will occur before this method returns, but + data requests received prior to that point may see a + table where only some modifications have taken effect. + + Returns: + Callable[[~.ModifyColumnFamiliesRequest], + Awaitable[~.Table]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'modify_column_families' not in self._stubs: + self._stubs['modify_column_families'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies', + request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, + response_deserializer=table.Table.deserialize, + ) + return self._stubs['modify_column_families'] + + @property + def drop_row_range(self) -> Callable[ + [bigtable_table_admin.DropRowRangeRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the drop row range method over gRPC. + + Permanently drop/delete a row range from a specified + table. The request can specify whether to delete all + rows in a table, or only those that match a particular + prefix. + + Returns: + Callable[[~.DropRowRangeRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'drop_row_range' not in self._stubs: + self._stubs['drop_row_range'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange', + request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['drop_row_range'] + + @property + def generate_consistency_token(self) -> Callable[ + [bigtable_table_admin.GenerateConsistencyTokenRequest], + Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse]]: + r"""Return a callable for the generate consistency token method over gRPC. + + Generates a consistency token for a Table, which can + be used in CheckConsistency to check whether mutations + to the table that finished before this call started have + been replicated. The tokens will be available for 90 + days. + + Returns: + Callable[[~.GenerateConsistencyTokenRequest], + Awaitable[~.GenerateConsistencyTokenResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'generate_consistency_token' not in self._stubs: + self._stubs['generate_consistency_token'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken', + request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, + response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, + ) + return self._stubs['generate_consistency_token'] + + @property + def check_consistency(self) -> Callable[ + [bigtable_table_admin.CheckConsistencyRequest], + Awaitable[bigtable_table_admin.CheckConsistencyResponse]]: + r"""Return a callable for the check consistency method over gRPC. + + Checks replication consistency based on a consistency + token, that is, if replication has caught up based on + the conditions specified in the token and the check + request. + + Returns: + Callable[[~.CheckConsistencyRequest], + Awaitable[~.CheckConsistencyResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'check_consistency' not in self._stubs: + self._stubs['check_consistency'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency', + request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, + response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, + ) + return self._stubs['check_consistency'] + + @property + def snapshot_table(self) -> Callable[ + [bigtable_table_admin.SnapshotTableRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the snapshot table method over gRPC. + + Creates a new snapshot in the specified cluster from + the specified source table. The cluster and the table + must be in the same instance. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.SnapshotTableRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'snapshot_table' not in self._stubs: + self._stubs['snapshot_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable', + request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['snapshot_table'] + + @property + def get_snapshot(self) -> Callable[ + [bigtable_table_admin.GetSnapshotRequest], + Awaitable[table.Snapshot]]: + r"""Return a callable for the get snapshot method over gRPC. + + Gets metadata information about the specified + snapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.GetSnapshotRequest], + Awaitable[~.Snapshot]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_snapshot' not in self._stubs: + self._stubs['get_snapshot'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot', + request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, + response_deserializer=table.Snapshot.deserialize, + ) + return self._stubs['get_snapshot'] + + @property + def list_snapshots(self) -> Callable[ + [bigtable_table_admin.ListSnapshotsRequest], + Awaitable[bigtable_table_admin.ListSnapshotsResponse]]: + r"""Return a callable for the list snapshots method over gRPC. + + Lists all snapshots associated with the specified + cluster. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.ListSnapshotsRequest], + Awaitable[~.ListSnapshotsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_snapshots' not in self._stubs: + self._stubs['list_snapshots'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots', + request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, + response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, + ) + return self._stubs['list_snapshots'] + + @property + def delete_snapshot(self) -> Callable[ + [bigtable_table_admin.DeleteSnapshotRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete snapshot method over gRPC. + + Permanently deletes the specified snapshot. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + Returns: + Callable[[~.DeleteSnapshotRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_snapshot' not in self._stubs: + self._stubs['delete_snapshot'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot', + request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_snapshot'] + + @property + def create_backup(self) -> Callable[ + [bigtable_table_admin.CreateBackupRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the create backup method over gRPC. + + Starts creating a new Cloud Bigtable Backup. The returned backup + [long-running operation][google.longrunning.Operation] can be + used to track creation of the backup. The + [metadata][google.longrunning.Operation.metadata] field type is + [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. + The [response][google.longrunning.Operation.response] field type + is [Backup][google.bigtable.admin.v2.Backup], if successful. + Cancelling the returned operation will stop the creation and + delete the backup. + + Returns: + Callable[[~.CreateBackupRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'create_backup' not in self._stubs: + self._stubs['create_backup'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup', + request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['create_backup'] + + @property + def get_backup(self) -> Callable[ + [bigtable_table_admin.GetBackupRequest], + Awaitable[table.Backup]]: + r"""Return a callable for the get backup method over gRPC. + + Gets metadata on a pending or completed Cloud + Bigtable Backup. + + Returns: + Callable[[~.GetBackupRequest], + Awaitable[~.Backup]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_backup' not in self._stubs: + self._stubs['get_backup'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup', + request_serializer=bigtable_table_admin.GetBackupRequest.serialize, + response_deserializer=table.Backup.deserialize, + ) + return self._stubs['get_backup'] + + @property + def update_backup(self) -> Callable[ + [bigtable_table_admin.UpdateBackupRequest], + Awaitable[table.Backup]]: + r"""Return a callable for the update backup method over gRPC. + + Updates a pending or completed Cloud Bigtable Backup. + + Returns: + Callable[[~.UpdateBackupRequest], + Awaitable[~.Backup]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'update_backup' not in self._stubs: + self._stubs['update_backup'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup', + request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, + response_deserializer=table.Backup.deserialize, + ) + return self._stubs['update_backup'] + + @property + def delete_backup(self) -> Callable[ + [bigtable_table_admin.DeleteBackupRequest], + Awaitable[empty_pb2.Empty]]: + r"""Return a callable for the delete backup method over gRPC. + + Deletes a pending or completed Cloud Bigtable backup. + + Returns: + Callable[[~.DeleteBackupRequest], + Awaitable[~.Empty]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'delete_backup' not in self._stubs: + self._stubs['delete_backup'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup', + request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, + response_deserializer=empty_pb2.Empty.FromString, + ) + return self._stubs['delete_backup'] + + @property + def list_backups(self) -> Callable[ + [bigtable_table_admin.ListBackupsRequest], + Awaitable[bigtable_table_admin.ListBackupsResponse]]: + r"""Return a callable for the list backups method over gRPC. + + Lists Cloud Bigtable backups. Returns both completed + and pending backups. + + Returns: + Callable[[~.ListBackupsRequest], + Awaitable[~.ListBackupsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'list_backups' not in self._stubs: + self._stubs['list_backups'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups', + request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, + response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, + ) + return self._stubs['list_backups'] + + @property + def restore_table(self) -> Callable[ + [bigtable_table_admin.RestoreTableRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the restore table method over gRPC. + + Create a new table by restoring from a completed backup. The + returned table [long-running + operation][google.longrunning.Operation] can be used to track + the progress of the operation, and to cancel it. The + [metadata][google.longrunning.Operation.metadata] field type is + [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. + The [response][google.longrunning.Operation.response] type is + [Table][google.bigtable.admin.v2.Table], if successful. + + Returns: + Callable[[~.RestoreTableRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'restore_table' not in self._stubs: + self._stubs['restore_table'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable', + request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['restore_table'] + + @property + def copy_backup(self) -> Callable[ + [bigtable_table_admin.CopyBackupRequest], + Awaitable[operations_pb2.Operation]]: + r"""Return a callable for the copy backup method over gRPC. + + Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Returns: + Callable[[~.CopyBackupRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'copy_backup' not in self._stubs: + self._stubs['copy_backup'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup', + request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs['copy_backup'] + + @property + def get_iam_policy(self) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + Awaitable[policy_pb2.Policy]]: + r"""Return a callable for the get iam policy method over gRPC. + + Gets the access control policy for a Table or Backup + resource. Returns an empty policy if the resource exists + but does not have a policy set. + + Returns: + Callable[[~.GetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'get_iam_policy' not in self._stubs: + self._stubs['get_iam_policy'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy', + request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs['get_iam_policy'] + + @property + def set_iam_policy(self) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + Awaitable[policy_pb2.Policy]]: + r"""Return a callable for the set iam policy method over gRPC. + + Sets the access control policy on a Table or Backup + resource. Replaces any existing policy. + + Returns: + Callable[[~.SetIamPolicyRequest], + Awaitable[~.Policy]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'set_iam_policy' not in self._stubs: + self._stubs['set_iam_policy'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy', + request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, + response_deserializer=policy_pb2.Policy.FromString, + ) + return self._stubs['set_iam_policy'] + + @property + def test_iam_permissions(self) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + Awaitable[iam_policy_pb2.TestIamPermissionsResponse]]: + r"""Return a callable for the test iam permissions method over gRPC. + + Returns permissions that the caller has on the + specified Table or Backup resource. + + Returns: + Callable[[~.TestIamPermissionsRequest], + Awaitable[~.TestIamPermissionsResponse]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if 'test_iam_permissions' not in self._stubs: + self._stubs['test_iam_permissions'] = self.grpc_channel.unary_unary( + '/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions', + request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, + response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, + ) + return self._stubs['test_iam_permissions'] + + def close(self): + return self.grpc_channel.close() + + +__all__ = ( + 'BigtableTableAdminGrpcAsyncIOTransport', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py new file mode 100644 index 000000000..a9b40218e --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -0,0 +1,3310 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from google.auth.transport.requests import AuthorizedSession # type: ignore +import json # type: ignore +import grpc # type: ignore +from google.auth.transport.grpc import SslCredentials # type: ignore +from google.auth import credentials as ga_credentials # type: ignore +from google.api_core import exceptions as core_exceptions +from google.api_core import retry as retries +from google.api_core import rest_helpers +from google.api_core import rest_streaming +from google.api_core import path_template +from google.api_core import gapic_v1 + +from google.protobuf import json_format +from google.api_core import operations_v1 +from requests import __version__ as requests_version +import dataclasses +import re +from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union +import warnings + +try: + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] +except AttributeError: # pragma: NO COVER + OptionalRetry = Union[retries.Retry, object] # type: ignore + + +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore + +from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO + + +DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( + gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, + grpc_version=None, + rest_version=requests_version, +) + + +class BigtableTableAdminRestInterceptor: + """Interceptor for BigtableTableAdmin. + + Interceptors are used to manipulate requests, request metadata, and responses + in arbitrary ways. + Example use cases include: + * Logging + * Verifying requests according to service or custom semantics + * Stripping extraneous information from responses + + These use cases and more can be enabled by injecting an + instance of a custom subclass when constructing the BigtableTableAdminRestTransport. + + .. code-block:: python + class MyCustomBigtableTableAdminInterceptor(BigtableTableAdminRestInterceptor): + def pre_check_consistency(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_check_consistency(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_copy_backup(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_copy_backup(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_backup(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_backup(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_table(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_create_table_from_snapshot(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_create_table_from_snapshot(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_delete_backup(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_delete_snapshot(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_delete_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_drop_row_range(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def pre_generate_consistency_token(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_generate_consistency_token(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_backup(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_backup(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_iam_policy(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_iam_policy(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_snapshot(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_snapshot(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_get_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_get_table(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_backups(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_backups(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_snapshots(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_snapshots(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_list_tables(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_list_tables(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_modify_column_families(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_modify_column_families(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_restore_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_restore_table(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_set_iam_policy(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_set_iam_policy(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_snapshot_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_snapshot_table(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_test_iam_permissions(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_test_iam_permissions(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_undelete_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_undelete_table(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_backup(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_backup(self, response): + logging.log(f"Received response: {response}") + return response + + def pre_update_table(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_update_table(self, response): + logging.log(f"Received response: {response}") + return response + + transport = BigtableTableAdminRestTransport(interceptor=MyCustomBigtableTableAdminInterceptor()) + client = BigtableTableAdminClient(transport=transport) + + + """ + def pre_check_consistency(self, request: bigtable_table_admin.CheckConsistencyRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.CheckConsistencyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for check_consistency + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_check_consistency(self, response: bigtable_table_admin.CheckConsistencyResponse) -> bigtable_table_admin.CheckConsistencyResponse: + """Post-rpc interceptor for check_consistency + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_copy_backup(self, request: bigtable_table_admin.CopyBackupRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.CopyBackupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for copy_backup + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_copy_backup(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for copy_backup + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_create_backup(self, request: bigtable_table_admin.CreateBackupRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.CreateBackupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_backup + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_create_backup(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for create_backup + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_create_table(self, request: bigtable_table_admin.CreateTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.CreateTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_create_table(self, response: gba_table.Table) -> gba_table.Table: + """Post-rpc interceptor for create_table + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_create_table_from_snapshot(self, request: bigtable_table_admin.CreateTableFromSnapshotRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.CreateTableFromSnapshotRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for create_table_from_snapshot + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_create_table_from_snapshot(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for create_table_from_snapshot + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_delete_backup(self, request: bigtable_table_admin.DeleteBackupRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.DeleteBackupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_backup + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def pre_delete_snapshot(self, request: bigtable_table_admin.DeleteSnapshotRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.DeleteSnapshotRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_snapshot + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def pre_delete_table(self, request: bigtable_table_admin.DeleteTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.DeleteTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for delete_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def pre_drop_row_range(self, request: bigtable_table_admin.DropRowRangeRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.DropRowRangeRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for drop_row_range + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def pre_generate_consistency_token(self, request: bigtable_table_admin.GenerateConsistencyTokenRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.GenerateConsistencyTokenRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for generate_consistency_token + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_generate_consistency_token(self, response: bigtable_table_admin.GenerateConsistencyTokenResponse) -> bigtable_table_admin.GenerateConsistencyTokenResponse: + """Post-rpc interceptor for generate_consistency_token + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_get_backup(self, request: bigtable_table_admin.GetBackupRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.GetBackupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_backup + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_get_backup(self, response: table.Backup) -> table.Backup: + """Post-rpc interceptor for get_backup + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_get_iam_policy(self, request: iam_policy_pb2.GetIamPolicyRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for get_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_get_snapshot(self, request: bigtable_table_admin.GetSnapshotRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.GetSnapshotRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_snapshot + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_get_snapshot(self, response: table.Snapshot) -> table.Snapshot: + """Post-rpc interceptor for get_snapshot + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_get_table(self, request: bigtable_table_admin.GetTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.GetTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for get_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_get_table(self, response: table.Table) -> table.Table: + """Post-rpc interceptor for get_table + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_list_backups(self, request: bigtable_table_admin.ListBackupsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.ListBackupsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_backups + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_list_backups(self, response: bigtable_table_admin.ListBackupsResponse) -> bigtable_table_admin.ListBackupsResponse: + """Post-rpc interceptor for list_backups + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_list_snapshots(self, request: bigtable_table_admin.ListSnapshotsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.ListSnapshotsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_snapshots + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_list_snapshots(self, response: bigtable_table_admin.ListSnapshotsResponse) -> bigtable_table_admin.ListSnapshotsResponse: + """Post-rpc interceptor for list_snapshots + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_list_tables(self, request: bigtable_table_admin.ListTablesRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.ListTablesRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for list_tables + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_list_tables(self, response: bigtable_table_admin.ListTablesResponse) -> bigtable_table_admin.ListTablesResponse: + """Post-rpc interceptor for list_tables + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_modify_column_families(self, request: bigtable_table_admin.ModifyColumnFamiliesRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.ModifyColumnFamiliesRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for modify_column_families + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_modify_column_families(self, response: table.Table) -> table.Table: + """Post-rpc interceptor for modify_column_families + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_restore_table(self, request: bigtable_table_admin.RestoreTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.RestoreTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for restore_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_restore_table(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for restore_table + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_set_iam_policy(self, request: iam_policy_pb2.SetIamPolicyRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: + """Post-rpc interceptor for set_iam_policy + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_snapshot_table(self, request: bigtable_table_admin.SnapshotTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.SnapshotTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for snapshot_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_snapshot_table(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for snapshot_table + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_test_iam_permissions(self, request: iam_policy_pb2.TestIamPermissionsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_test_iam_permissions(self, response: iam_policy_pb2.TestIamPermissionsResponse) -> iam_policy_pb2.TestIamPermissionsResponse: + """Post-rpc interceptor for test_iam_permissions + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_undelete_table(self, request: bigtable_table_admin.UndeleteTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.UndeleteTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for undelete_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_undelete_table(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for undelete_table + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_update_backup(self, request: bigtable_table_admin.UpdateBackupRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.UpdateBackupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_backup + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_update_backup(self, response: table.Backup) -> table.Backup: + """Post-rpc interceptor for update_backup + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_update_table(self, request: bigtable_table_admin.UpdateTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.UpdateTableRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for update_table + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_update_table(self, response: operations_pb2.Operation) -> operations_pb2.Operation: + """Post-rpc interceptor for update_table + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + + +@dataclasses.dataclass +class BigtableTableAdminRestStub: + _session: AuthorizedSession + _host: str + _interceptor: BigtableTableAdminRestInterceptor + + +class BigtableTableAdminRestTransport(BigtableTableAdminTransport): + """REST backend transport for BigtableTableAdmin. + + Service for creating, configuring, and deleting Cloud + Bigtable tables. + + Provides access to the table schemas only, not the data stored + within the tables. + + This class defines the same methods as the primary client, so the + primary client can load the underlying transport implementation + and call it. + + It sends JSON representations of protocol buffers over HTTP/1.1 + + """ + + def __init__(self, *, + host: str = 'bigtableadmin.googleapis.com', + credentials: Optional[ga_credentials.Credentials] = None, + credentials_file: Optional[str] = None, + scopes: Optional[Sequence[str]] = None, + client_cert_source_for_mtls: Optional[Callable[[ + ], Tuple[bytes, bytes]]] = None, + quota_project_id: Optional[str] = None, + client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, + always_use_jwt_access: Optional[bool] = False, + url_scheme: str = 'https', + interceptor: Optional[BigtableTableAdminRestInterceptor] = None, + api_audience: Optional[str] = None, + ) -> None: + """Instantiate the transport. + + Args: + host (Optional[str]): + The hostname to connect to. + credentials (Optional[google.auth.credentials.Credentials]): The + authorization credentials to attach to requests. These + credentials identify the application to the service; if none + are specified, the client will attempt to ascertain the + credentials from the environment. + + credentials_file (Optional[str]): A file with credentials that can + be loaded with :func:`google.auth.load_credentials_from_file`. + This argument is ignored if ``channel`` is provided. + scopes (Optional(Sequence[str])): A list of scopes. This argument is + ignored if ``channel`` is provided. + client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client + certificate to configure mutual TLS HTTP channel. It is ignored + if ``channel`` is provided. + quota_project_id (Optional[str]): An optional project to use for billing + and quota. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you are developing + your own client library. + always_use_jwt_access (Optional[bool]): Whether self signed JWT should + be used for service account credentials. + url_scheme: the protocol scheme for the API endpoint. Normally + "https", but for testing or local servers, + "http" can be specified. + """ + # Run the base constructor + # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. + # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the + # credentials object + maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) + if maybe_url_match is None: + raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER + + url_match_items = maybe_url_match.groupdict() + + host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host + + super().__init__( + host=host, + credentials=credentials, + client_info=client_info, + always_use_jwt_access=always_use_jwt_access, + api_audience=api_audience + ) + self._session = AuthorizedSession( + self._credentials, default_host=self.DEFAULT_HOST) + self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None + if client_cert_source_for_mtls: + self._session.configure_mtls_channel(client_cert_source_for_mtls) + self._interceptor = interceptor or BigtableTableAdminRestInterceptor() + self._prep_wrapped_messages(client_info) + + @property + def operations_client(self) -> operations_v1.AbstractOperationsClient: + """Create the client designed to process long-running operations. + + This property caches on the instance; repeated calls return the same + client. + """ + # Only create a new client if we do not already have one. + if self._operations_client is None: + http_options: Dict[str, List[Dict[str, str]]] = { + 'google.longrunning.Operations.CancelOperation': [ + { + 'method': 'post', + 'uri': '/v2/{name=operations/**}:cancel', + }, + ], + 'google.longrunning.Operations.DeleteOperation': [ + { + 'method': 'delete', + 'uri': '/v2/{name=operations/**}', + }, + ], + 'google.longrunning.Operations.GetOperation': [ + { + 'method': 'get', + 'uri': '/v2/{name=operations/**}', + }, + ], + 'google.longrunning.Operations.ListOperations': [ + { + 'method': 'get', + 'uri': '/v2/{name=operations/projects/**}/operations', + }, + ], + } + + rest_transport = operations_v1.OperationsRestTransport( + host=self._host, + # use the credentials which are saved + credentials=self._credentials, + scopes=self._scopes, + http_options=http_options, + path_prefix="v2") + + self._operations_client = operations_v1.AbstractOperationsClient(transport=rest_transport) + + # Return the client from cache. + return self._operations_client + + class _CheckConsistency(BigtableTableAdminRestStub): + def __hash__(self): + return hash("CheckConsistency") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.CheckConsistencyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> bigtable_table_admin.CheckConsistencyResponse: + r"""Call the check consistency method over HTTP. + + Args: + request (~.bigtable_table_admin.CheckConsistencyRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_table_admin.CheckConsistencyResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{name=projects/*/instances/*/tables/*}:checkConsistency', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_check_consistency(request, metadata) + pb_request = bigtable_table_admin.CheckConsistencyRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.CheckConsistencyResponse() + pb_resp = bigtable_table_admin.CheckConsistencyResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_check_consistency(resp) + return resp + + class _CopyBackup(BigtableTableAdminRestStub): + def __hash__(self): + return hash("CopyBackup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.CopyBackupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the copy backup method over HTTP. + + Args: + request (~.bigtable_table_admin.CopyBackupRequest): + The request object. The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_copy_backup(request, metadata) + pb_request = bigtable_table_admin.CopyBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_copy_backup(resp) + return resp + + class _CreateBackup(BigtableTableAdminRestStub): + def __hash__(self): + return hash("CreateBackup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "backupId" : "", } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.CreateBackupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the create backup method over HTTP. + + Args: + request (~.bigtable_table_admin.CreateBackupRequest): + The request object. The request for + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{parent=projects/*/instances/*/clusters/*}/backups', + 'body': 'backup', + }, + ] + request, metadata = self._interceptor.pre_create_backup(request, metadata) + pb_request = bigtable_table_admin.CreateBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_backup(resp) + return resp + + class _CreateTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("CreateTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.CreateTableRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> gba_table.Table: + r"""Call the create table method over HTTP. + + Args: + request (~.bigtable_table_admin.CreateTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.gba_table.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{parent=projects/*/instances/*}/tables', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_create_table(request, metadata) + pb_request = bigtable_table_admin.CreateTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = gba_table.Table() + pb_resp = gba_table.Table.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_table(resp) + return resp + + class _CreateTableFromSnapshot(BigtableTableAdminRestStub): + def __hash__(self): + return hash("CreateTableFromSnapshot") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.CreateTableFromSnapshotRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the create table from + snapshot method over HTTP. + + Args: + request (~.bigtable_table_admin.CreateTableFromSnapshotRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_create_table_from_snapshot(request, metadata) + pb_request = bigtable_table_admin.CreateTableFromSnapshotRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_table_from_snapshot(resp) + return resp + + class _DeleteBackup(BigtableTableAdminRestStub): + def __hash__(self): + return hash("DeleteBackup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.DeleteBackupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ): + r"""Call the delete backup method over HTTP. + + Args: + request (~.bigtable_table_admin.DeleteBackupRequest): + The request object. The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v2/{name=projects/*/instances/*/clusters/*/backups/*}', + }, + ] + request, metadata = self._interceptor.pre_delete_backup(request, metadata) + pb_request = bigtable_table_admin.DeleteBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteSnapshot(BigtableTableAdminRestStub): + def __hash__(self): + return hash("DeleteSnapshot") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.DeleteSnapshotRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ): + r"""Call the delete snapshot method over HTTP. + + Args: + request (~.bigtable_table_admin.DeleteSnapshotRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}', + }, + ] + request, metadata = self._interceptor.pre_delete_snapshot(request, metadata) + pb_request = bigtable_table_admin.DeleteSnapshotRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DeleteTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("DeleteTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.DeleteTableRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ): + r"""Call the delete table method over HTTP. + + Args: + request (~.bigtable_table_admin.DeleteTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'delete', + 'uri': '/v2/{name=projects/*/instances/*/tables/*}', + }, + ] + request, metadata = self._interceptor.pre_delete_table(request, metadata) + pb_request = bigtable_table_admin.DeleteTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _DropRowRange(BigtableTableAdminRestStub): + def __hash__(self): + return hash("DropRowRange") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.DropRowRangeRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ): + r"""Call the drop row range method over HTTP. + + Args: + request (~.bigtable_table_admin.DropRowRangeRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{name=projects/*/instances/*/tables/*}:dropRowRange', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_drop_row_range(request, metadata) + pb_request = bigtable_table_admin.DropRowRangeRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + class _GenerateConsistencyToken(BigtableTableAdminRestStub): + def __hash__(self): + return hash("GenerateConsistencyToken") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.GenerateConsistencyTokenRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: + r"""Call the generate consistency + token method over HTTP. + + Args: + request (~.bigtable_table_admin.GenerateConsistencyTokenRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_table_admin.GenerateConsistencyTokenResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_generate_consistency_token(request, metadata) + pb_request = bigtable_table_admin.GenerateConsistencyTokenRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.GenerateConsistencyTokenResponse() + pb_resp = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_generate_consistency_token(resp) + return resp + + class _GetBackup(BigtableTableAdminRestStub): + def __hash__(self): + return hash("GetBackup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.GetBackupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> table.Backup: + r"""Call the get backup method over HTTP. + + Args: + request (~.bigtable_table_admin.GetBackupRequest): + The request object. The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.table.Backup: + A backup of a Cloud Bigtable table. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{name=projects/*/instances/*/clusters/*/backups/*}', + }, + ] + request, metadata = self._interceptor.pre_get_backup(request, metadata) + pb_request = bigtable_table_admin.GetBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table.Backup() + pb_resp = table.Backup.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_backup(resp) + return resp + + class _GetIamPolicy(BigtableTableAdminRestStub): + def __hash__(self): + return hash("GetIamPolicy") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: iam_policy_pb2.GetIamPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> policy_pb2.Policy: + r"""Call the get iam policy method over HTTP. + + Args: + request (~.iam_policy_pb2.GetIamPolicyRequest): + The request object. Request message for ``GetIamPolicy`` method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members``, or + principals, to a single ``role``. Principals can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A ``role`` is a named list of + permissions; each ``role`` can be an IAM predefined role + or a user-created custom role. + + For some types of Google Cloud resources, a ``binding`` + can also specify a ``condition``, which is a logical + expression that allows access to a resource only if the + expression evaluates to ``true``. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + + **JSON example:** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } + + **YAML example:** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 + + For a description of IAM and its features, see the `IAM + documentation `__. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy', + 'body': '*', + }, +{ + 'method': 'post', + 'uri': '/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = policy_pb2.Policy() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_iam_policy(resp) + return resp + + class _GetSnapshot(BigtableTableAdminRestStub): + def __hash__(self): + return hash("GetSnapshot") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.GetSnapshotRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> table.Snapshot: + r"""Call the get snapshot method over HTTP. + + Args: + request (~.bigtable_table_admin.GetSnapshotRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.table.Snapshot: + A snapshot of a table at a particular + time. A snapshot can be used as a + checkpoint for data restoration or a + data source for a new table. + + Note: This is a private alpha release of + Cloud Bigtable snapshots. This feature + is not currently available to most Cloud + Bigtable customers. This feature might + be changed in backward-incompatible ways + and is not recommended for production + use. It is not subject to any SLA or + deprecation policy. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}', + }, + ] + request, metadata = self._interceptor.pre_get_snapshot(request, metadata) + pb_request = bigtable_table_admin.GetSnapshotRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table.Snapshot() + pb_resp = table.Snapshot.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_snapshot(resp) + return resp + + class _GetTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("GetTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.GetTableRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> table.Table: + r"""Call the get table method over HTTP. + + Args: + request (~.bigtable_table_admin.GetTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.table.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{name=projects/*/instances/*/tables/*}', + }, + ] + request, metadata = self._interceptor.pre_get_table(request, metadata) + pb_request = bigtable_table_admin.GetTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table.Table() + pb_resp = table.Table.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_table(resp) + return resp + + class _ListBackups(BigtableTableAdminRestStub): + def __hash__(self): + return hash("ListBackups") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.ListBackupsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> bigtable_table_admin.ListBackupsResponse: + r"""Call the list backups method over HTTP. + + Args: + request (~.bigtable_table_admin.ListBackupsRequest): + The request object. The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_table_admin.ListBackupsResponse: + The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{parent=projects/*/instances/*/clusters/*}/backups', + }, + ] + request, metadata = self._interceptor.pre_list_backups(request, metadata) + pb_request = bigtable_table_admin.ListBackupsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.ListBackupsResponse() + pb_resp = bigtable_table_admin.ListBackupsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_backups(resp) + return resp + + class _ListSnapshots(BigtableTableAdminRestStub): + def __hash__(self): + return hash("ListSnapshots") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.ListSnapshotsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> bigtable_table_admin.ListSnapshotsResponse: + r"""Call the list snapshots method over HTTP. + + Args: + request (~.bigtable_table_admin.ListSnapshotsRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_table_admin.ListSnapshotsResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{parent=projects/*/instances/*/clusters/*}/snapshots', + }, + ] + request, metadata = self._interceptor.pre_list_snapshots(request, metadata) + pb_request = bigtable_table_admin.ListSnapshotsRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.ListSnapshotsResponse() + pb_resp = bigtable_table_admin.ListSnapshotsResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_snapshots(resp) + return resp + + class _ListTables(BigtableTableAdminRestStub): + def __hash__(self): + return hash("ListTables") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.ListTablesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> bigtable_table_admin.ListTablesResponse: + r"""Call the list tables method over HTTP. + + Args: + request (~.bigtable_table_admin.ListTablesRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.bigtable_table_admin.ListTablesResponse: + Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'get', + 'uri': '/v2/{parent=projects/*/instances/*}/tables', + }, + ] + request, metadata = self._interceptor.pre_list_tables(request, metadata) + pb_request = bigtable_table_admin.ListTablesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = bigtable_table_admin.ListTablesResponse() + pb_resp = bigtable_table_admin.ListTablesResponse.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_tables(resp) + return resp + + class _ModifyColumnFamilies(BigtableTableAdminRestStub): + def __hash__(self): + return hash("ModifyColumnFamilies") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.ModifyColumnFamiliesRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> table.Table: + r"""Call the modify column families method over HTTP. + + Args: + request (~.bigtable_table_admin.ModifyColumnFamiliesRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.table.Table: + A collection of user data indexed by + row, column, and timestamp. Each table + is served using the resources of its + parent cluster. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_modify_column_families(request, metadata) + pb_request = bigtable_table_admin.ModifyColumnFamiliesRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table.Table() + pb_resp = table.Table.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_modify_column_families(resp) + return resp + + class _RestoreTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("RestoreTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.RestoreTableRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the restore table method over HTTP. + + Args: + request (~.bigtable_table_admin.RestoreTableRequest): + The request object. The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{parent=projects/*/instances/*}/tables:restore', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_restore_table(request, metadata) + pb_request = bigtable_table_admin.RestoreTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_restore_table(resp) + return resp + + class _SetIamPolicy(BigtableTableAdminRestStub): + def __hash__(self): + return hash("SetIamPolicy") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: iam_policy_pb2.SetIamPolicyRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> policy_pb2.Policy: + r"""Call the set iam policy method over HTTP. + + Args: + request (~.iam_policy_pb2.SetIamPolicyRequest): + The request object. Request message for ``SetIamPolicy`` method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.policy_pb2.Policy: + An Identity and Access Management (IAM) policy, which + specifies access controls for Google Cloud resources. + + A ``Policy`` is a collection of ``bindings``. A + ``binding`` binds one or more ``members``, or + principals, to a single ``role``. Principals can be user + accounts, service accounts, Google groups, and domains + (such as G Suite). A ``role`` is a named list of + permissions; each ``role`` can be an IAM predefined role + or a user-created custom role. + + For some types of Google Cloud resources, a ``binding`` + can also specify a ``condition``, which is a logical + expression that allows access to a resource only if the + expression evaluates to ``true``. A condition can add + constraints based on attributes of the request, the + resource, or both. To learn which resources support + conditions in their IAM policies, see the `IAM + documentation `__. + + **JSON example:** + + :: + + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } + + **YAML example:** + + :: + + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 + + For a description of IAM and its features, see the `IAM + documentation `__. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy', + 'body': '*', + }, +{ + 'method': 'post', + 'uri': '/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = policy_pb2.Policy() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_set_iam_policy(resp) + return resp + + class _SnapshotTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("SnapshotTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.SnapshotTableRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the snapshot table method over HTTP. + + Args: + request (~.bigtable_table_admin.SnapshotTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to + most Cloud Bigtable customers. This feature might be + changed in backward-incompatible ways and is not + recommended for production use. It is not subject to any + SLA or deprecation policy. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{name=projects/*/instances/*/tables/*}:snapshot', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_snapshot_table(request, metadata) + pb_request = bigtable_table_admin.SnapshotTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_snapshot_table(resp) + return resp + + class _TestIamPermissions(BigtableTableAdminRestStub): + def __hash__(self): + return hash("TestIamPermissions") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: iam_policy_pb2.TestIamPermissionsRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> iam_policy_pb2.TestIamPermissionsResponse: + r"""Call the test iam permissions method over HTTP. + + Args: + request (~.iam_policy_pb2.TestIamPermissionsRequest): + The request object. Request message for ``TestIamPermissions`` method. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.iam_policy_pb2.TestIamPermissionsResponse: + Response message for ``TestIamPermissions`` method. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions', + 'body': '*', + }, +{ + 'method': 'post', + 'uri': '/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_test_iam_permissions(request, metadata) + pb_request = request + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = iam_policy_pb2.TestIamPermissionsResponse() + pb_resp = resp + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_test_iam_permissions(resp) + return resp + + class _UndeleteTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("UndeleteTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.UndeleteTableRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the undelete table method over HTTP. + + Args: + request (~.bigtable_table_admin.UndeleteTableRequest): + The request object. Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'post', + 'uri': '/v2/{name=projects/*/instances/*/tables/*}:undelete', + 'body': '*', + }, + ] + request, metadata = self._interceptor.pre_undelete_table(request, metadata) + pb_request = bigtable_table_admin.UndeleteTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_undelete_table(resp) + return resp + + class _UpdateBackup(BigtableTableAdminRestStub): + def __hash__(self): + return hash("UpdateBackup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask" : {}, } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.UpdateBackupRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> table.Backup: + r"""Call the update backup method over HTTP. + + Args: + request (~.bigtable_table_admin.UpdateBackupRequest): + The request object. The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.table.Backup: + A backup of a Cloud Bigtable table. + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'patch', + 'uri': '/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}', + 'body': 'backup', + }, + ] + request, metadata = self._interceptor.pre_update_backup(request, metadata) + pb_request = bigtable_table_admin.UpdateBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = table.Backup() + pb_resp = table.Backup.pb(resp) + + json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_backup(resp) + return resp + + class _UpdateTable(BigtableTableAdminRestStub): + def __hash__(self): + return hash("UpdateTable") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { + "updateMask" : {}, } + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} + + def __call__(self, + request: bigtable_table_admin.UpdateTableRequest, *, + retry: OptionalRetry=gapic_v1.method.DEFAULT, + timeout: Optional[float]=None, + metadata: Sequence[Tuple[str, str]]=(), + ) -> operations_pb2.Operation: + r"""Call the update table method over HTTP. + + Args: + request (~.bigtable_table_admin.UpdateTableRequest): + The request object. The request for + [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [{ + 'method': 'patch', + 'uri': '/v2/{table.name=projects/*/instances/*/tables/*}', + 'body': 'table', + }, + ] + request, metadata = self._interceptor.pre_update_table(request, metadata) + pb_request = bigtable_table_admin.UpdateTableRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request['body'], + including_default_value_fields=False, + use_integers_for_enums=True + ) + uri = transcoded_request['uri'] + method = transcoded_request['method'] + + # Jsonify the query params + query_params = json.loads(json_format.MessageToJson( + transcoded_request['query_params'], + including_default_value_fields=False, + use_integers_for_enums=True, + )) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers['Content-Type'] = 'application/json' + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_table(resp) + return resp + + @property + def check_consistency(self) -> Callable[ + [bigtable_table_admin.CheckConsistencyRequest], + bigtable_table_admin.CheckConsistencyResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CheckConsistency(self._session, self._host, self._interceptor) # type: ignore + + @property + def copy_backup(self) -> Callable[ + [bigtable_table_admin.CopyBackupRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CopyBackup(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_backup(self) -> Callable[ + [bigtable_table_admin.CreateBackupRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateBackup(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_table(self) -> Callable[ + [bigtable_table_admin.CreateTableRequest], + gba_table.Table]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def create_table_from_snapshot(self) -> Callable[ + [bigtable_table_admin.CreateTableFromSnapshotRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CreateTableFromSnapshot(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_backup(self) -> Callable[ + [bigtable_table_admin.DeleteBackupRequest], + empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteBackup(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_snapshot(self) -> Callable[ + [bigtable_table_admin.DeleteSnapshotRequest], + empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteSnapshot(self._session, self._host, self._interceptor) # type: ignore + + @property + def delete_table(self) -> Callable[ + [bigtable_table_admin.DeleteTableRequest], + empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DeleteTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def drop_row_range(self) -> Callable[ + [bigtable_table_admin.DropRowRangeRequest], + empty_pb2.Empty]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._DropRowRange(self._session, self._host, self._interceptor) # type: ignore + + @property + def generate_consistency_token(self) -> Callable[ + [bigtable_table_admin.GenerateConsistencyTokenRequest], + bigtable_table_admin.GenerateConsistencyTokenResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GenerateConsistencyToken(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_backup(self) -> Callable[ + [bigtable_table_admin.GetBackupRequest], + table.Backup]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetBackup(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_iam_policy(self) -> Callable[ + [iam_policy_pb2.GetIamPolicyRequest], + policy_pb2.Policy]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_snapshot(self) -> Callable[ + [bigtable_table_admin.GetSnapshotRequest], + table.Snapshot]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetSnapshot(self._session, self._host, self._interceptor) # type: ignore + + @property + def get_table(self) -> Callable[ + [bigtable_table_admin.GetTableRequest], + table.Table]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._GetTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_backups(self) -> Callable[ + [bigtable_table_admin.ListBackupsRequest], + bigtable_table_admin.ListBackupsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListBackups(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_snapshots(self) -> Callable[ + [bigtable_table_admin.ListSnapshotsRequest], + bigtable_table_admin.ListSnapshotsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListSnapshots(self._session, self._host, self._interceptor) # type: ignore + + @property + def list_tables(self) -> Callable[ + [bigtable_table_admin.ListTablesRequest], + bigtable_table_admin.ListTablesResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ListTables(self._session, self._host, self._interceptor) # type: ignore + + @property + def modify_column_families(self) -> Callable[ + [bigtable_table_admin.ModifyColumnFamiliesRequest], + table.Table]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._ModifyColumnFamilies(self._session, self._host, self._interceptor) # type: ignore + + @property + def restore_table(self) -> Callable[ + [bigtable_table_admin.RestoreTableRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._RestoreTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def set_iam_policy(self) -> Callable[ + [iam_policy_pb2.SetIamPolicyRequest], + policy_pb2.Policy]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore + + @property + def snapshot_table(self) -> Callable[ + [bigtable_table_admin.SnapshotTableRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._SnapshotTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def test_iam_permissions(self) -> Callable[ + [iam_policy_pb2.TestIamPermissionsRequest], + iam_policy_pb2.TestIamPermissionsResponse]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore + + @property + def undelete_table(self) -> Callable[ + [bigtable_table_admin.UndeleteTableRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UndeleteTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_backup(self) -> Callable[ + [bigtable_table_admin.UpdateBackupRequest], + table.Backup]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateBackup(self._session, self._host, self._interceptor) # type: ignore + + @property + def update_table(self) -> Callable[ + [bigtable_table_admin.UpdateTableRequest], + operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._UpdateTable(self._session, self._host, self._interceptor) # type: ignore + + @property + def kind(self) -> str: + return "rest" + + def close(self): + self._session.close() + + +__all__=( + 'BigtableTableAdminRestTransport', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/__init__.py new file mode 100644 index 000000000..2ed192c75 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/__init__.py @@ -0,0 +1,186 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from .bigtable_instance_admin import ( + CreateAppProfileRequest, + CreateClusterMetadata, + CreateClusterRequest, + CreateInstanceMetadata, + CreateInstanceRequest, + DeleteAppProfileRequest, + DeleteClusterRequest, + DeleteInstanceRequest, + GetAppProfileRequest, + GetClusterRequest, + GetInstanceRequest, + ListAppProfilesRequest, + ListAppProfilesResponse, + ListClustersRequest, + ListClustersResponse, + ListHotTabletsRequest, + ListHotTabletsResponse, + ListInstancesRequest, + ListInstancesResponse, + PartialUpdateClusterMetadata, + PartialUpdateClusterRequest, + PartialUpdateInstanceRequest, + UpdateAppProfileMetadata, + UpdateAppProfileRequest, + UpdateClusterMetadata, + UpdateInstanceMetadata, +) +from .bigtable_table_admin import ( + CheckConsistencyRequest, + CheckConsistencyResponse, + CopyBackupMetadata, + CopyBackupRequest, + CreateBackupMetadata, + CreateBackupRequest, + CreateTableFromSnapshotMetadata, + CreateTableFromSnapshotRequest, + CreateTableRequest, + DeleteBackupRequest, + DeleteSnapshotRequest, + DeleteTableRequest, + DropRowRangeRequest, + GenerateConsistencyTokenRequest, + GenerateConsistencyTokenResponse, + GetBackupRequest, + GetSnapshotRequest, + GetTableRequest, + ListBackupsRequest, + ListBackupsResponse, + ListSnapshotsRequest, + ListSnapshotsResponse, + ListTablesRequest, + ListTablesResponse, + ModifyColumnFamiliesRequest, + OptimizeRestoredTableMetadata, + RestoreTableMetadata, + RestoreTableRequest, + SnapshotTableMetadata, + SnapshotTableRequest, + UndeleteTableMetadata, + UndeleteTableRequest, + UpdateBackupRequest, + UpdateTableMetadata, + UpdateTableRequest, +) +from .common import ( + OperationProgress, + StorageType, +) +from .instance import ( + AppProfile, + AutoscalingLimits, + AutoscalingTargets, + Cluster, + HotTablet, + Instance, +) +from .table import ( + Backup, + BackupInfo, + ChangeStreamConfig, + ColumnFamily, + EncryptionInfo, + GcRule, + RestoreInfo, + Snapshot, + Table, + RestoreSourceType, +) + +__all__ = ( + 'CreateAppProfileRequest', + 'CreateClusterMetadata', + 'CreateClusterRequest', + 'CreateInstanceMetadata', + 'CreateInstanceRequest', + 'DeleteAppProfileRequest', + 'DeleteClusterRequest', + 'DeleteInstanceRequest', + 'GetAppProfileRequest', + 'GetClusterRequest', + 'GetInstanceRequest', + 'ListAppProfilesRequest', + 'ListAppProfilesResponse', + 'ListClustersRequest', + 'ListClustersResponse', + 'ListHotTabletsRequest', + 'ListHotTabletsResponse', + 'ListInstancesRequest', + 'ListInstancesResponse', + 'PartialUpdateClusterMetadata', + 'PartialUpdateClusterRequest', + 'PartialUpdateInstanceRequest', + 'UpdateAppProfileMetadata', + 'UpdateAppProfileRequest', + 'UpdateClusterMetadata', + 'UpdateInstanceMetadata', + 'CheckConsistencyRequest', + 'CheckConsistencyResponse', + 'CopyBackupMetadata', + 'CopyBackupRequest', + 'CreateBackupMetadata', + 'CreateBackupRequest', + 'CreateTableFromSnapshotMetadata', + 'CreateTableFromSnapshotRequest', + 'CreateTableRequest', + 'DeleteBackupRequest', + 'DeleteSnapshotRequest', + 'DeleteTableRequest', + 'DropRowRangeRequest', + 'GenerateConsistencyTokenRequest', + 'GenerateConsistencyTokenResponse', + 'GetBackupRequest', + 'GetSnapshotRequest', + 'GetTableRequest', + 'ListBackupsRequest', + 'ListBackupsResponse', + 'ListSnapshotsRequest', + 'ListSnapshotsResponse', + 'ListTablesRequest', + 'ListTablesResponse', + 'ModifyColumnFamiliesRequest', + 'OptimizeRestoredTableMetadata', + 'RestoreTableMetadata', + 'RestoreTableRequest', + 'SnapshotTableMetadata', + 'SnapshotTableRequest', + 'UndeleteTableMetadata', + 'UndeleteTableRequest', + 'UpdateBackupRequest', + 'UpdateTableMetadata', + 'UpdateTableRequest', + 'OperationProgress', + 'StorageType', + 'AppProfile', + 'AutoscalingLimits', + 'AutoscalingTargets', + 'Cluster', + 'HotTablet', + 'Instance', + 'Backup', + 'BackupInfo', + 'ChangeStreamConfig', + 'ColumnFamily', + 'EncryptionInfo', + 'GcRule', + 'RestoreInfo', + 'Snapshot', + 'Table', + 'RestoreSourceType', +) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py new file mode 100644 index 000000000..0acfa10bb --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -0,0 +1,891 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.bigtable_admin_v2.types import instance as gba_instance +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.bigtable.admin.v2', + manifest={ + 'CreateInstanceRequest', + 'GetInstanceRequest', + 'ListInstancesRequest', + 'ListInstancesResponse', + 'PartialUpdateInstanceRequest', + 'DeleteInstanceRequest', + 'CreateClusterRequest', + 'GetClusterRequest', + 'ListClustersRequest', + 'ListClustersResponse', + 'DeleteClusterRequest', + 'CreateInstanceMetadata', + 'UpdateInstanceMetadata', + 'CreateClusterMetadata', + 'UpdateClusterMetadata', + 'PartialUpdateClusterMetadata', + 'PartialUpdateClusterRequest', + 'CreateAppProfileRequest', + 'GetAppProfileRequest', + 'ListAppProfilesRequest', + 'ListAppProfilesResponse', + 'UpdateAppProfileRequest', + 'DeleteAppProfileRequest', + 'UpdateAppProfileMetadata', + 'ListHotTabletsRequest', + 'ListHotTabletsResponse', + }, +) + + +class CreateInstanceRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.CreateInstance. + + Attributes: + parent (str): + Required. The unique name of the project in which to create + the new instance. Values are of the form + ``projects/{project}``. + instance_id (str): + Required. The ID to be used when referring to the new + instance within its project, e.g., just ``myinstance`` + rather than ``projects/myproject/instances/myinstance``. + instance (google.cloud.bigtable_admin_v2.types.Instance): + Required. The instance to create. Fields marked + ``OutputOnly`` must be left blank. + clusters (MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]): + Required. The clusters to be created within the instance, + mapped by desired cluster ID, e.g., just ``mycluster`` + rather than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + Fields marked ``OutputOnly`` must be left blank. Currently, + at most four clusters can be specified. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + instance_id: str = proto.Field( + proto.STRING, + number=2, + ) + instance: gba_instance.Instance = proto.Field( + proto.MESSAGE, + number=3, + message=gba_instance.Instance, + ) + clusters: MutableMapping[str, gba_instance.Cluster] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=4, + message=gba_instance.Cluster, + ) + + +class GetInstanceRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.GetInstance. + + Attributes: + name (str): + Required. The unique name of the requested instance. Values + are of the form ``projects/{project}/instances/{instance}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListInstancesRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.ListInstances. + + Attributes: + parent (str): + Required. The unique name of the project for which a list of + instances is requested. Values are of the form + ``projects/{project}``. + page_token (str): + DEPRECATED: This field is unused and ignored. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class ListInstancesResponse(proto.Message): + r"""Response message for BigtableInstanceAdmin.ListInstances. + + Attributes: + instances (MutableSequence[google.cloud.bigtable_admin_v2.types.Instance]): + The list of requested instances. + failed_locations (MutableSequence[str]): + Locations from which Instance information could not be + retrieved, due to an outage or some other transient + condition. Instances whose Clusters are all in one of the + failed locations may be missing from ``instances``, and + Instances with at least one Cluster in a failed location may + only have partial information returned. Values are of the + form ``projects//locations/`` + next_page_token (str): + DEPRECATED: This field is unused and ignored. + """ + + @property + def raw_page(self): + return self + + instances: MutableSequence[gba_instance.Instance] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_instance.Instance, + ) + failed_locations: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=2, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class PartialUpdateInstanceRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.PartialUpdateInstance. + + Attributes: + instance (google.cloud.bigtable_admin_v2.types.Instance): + Required. The Instance which will (partially) + replace the current value. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of Instance fields which + should be replaced. Must be explicitly set. + """ + + instance: gba_instance.Instance = proto.Field( + proto.MESSAGE, + number=1, + message=gba_instance.Instance, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class DeleteInstanceRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.DeleteInstance. + + Attributes: + name (str): + Required. The unique name of the instance to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateClusterRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.CreateCluster. + + Attributes: + parent (str): + Required. The unique name of the instance in which to create + the new cluster. Values are of the form + ``projects/{project}/instances/{instance}``. + cluster_id (str): + Required. The ID to be used when referring to the new + cluster within its instance, e.g., just ``mycluster`` rather + than + ``projects/myproject/instances/myinstance/clusters/mycluster``. + cluster (google.cloud.bigtable_admin_v2.types.Cluster): + Required. The cluster to be created. Fields marked + ``OutputOnly`` must be left blank. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + cluster_id: str = proto.Field( + proto.STRING, + number=2, + ) + cluster: gba_instance.Cluster = proto.Field( + proto.MESSAGE, + number=3, + message=gba_instance.Cluster, + ) + + +class GetClusterRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.GetCluster. + + Attributes: + name (str): + Required. The unique name of the requested cluster. Values + are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListClustersRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.ListClusters. + + Attributes: + parent (str): + Required. The unique name of the instance for which a list + of clusters is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list Clusters for all Instances in a + project, e.g., ``projects/myproject/instances/-``. + page_token (str): + DEPRECATED: This field is unused and ignored. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class ListClustersResponse(proto.Message): + r"""Response message for BigtableInstanceAdmin.ListClusters. + + Attributes: + clusters (MutableSequence[google.cloud.bigtable_admin_v2.types.Cluster]): + The list of requested clusters. + failed_locations (MutableSequence[str]): + Locations from which Cluster information could not be + retrieved, due to an outage or some other transient + condition. Clusters from these locations may be missing from + ``clusters``, or may only have partial information returned. + Values are of the form + ``projects//locations/`` + next_page_token (str): + DEPRECATED: This field is unused and ignored. + """ + + @property + def raw_page(self): + return self + + clusters: MutableSequence[gba_instance.Cluster] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_instance.Cluster, + ) + failed_locations: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=2, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class DeleteClusterRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.DeleteCluster. + + Attributes: + name (str): + Required. The unique name of the cluster to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class CreateInstanceMetadata(proto.Message): + r"""The metadata for the Operation returned by CreateInstance. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateInstanceRequest): + The request that prompted the initiation of + this CreateInstance operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request: 'CreateInstanceRequest' = proto.Field( + proto.MESSAGE, + number=1, + message='CreateInstanceRequest', + ) + request_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class UpdateInstanceMetadata(proto.Message): + r"""The metadata for the Operation returned by UpdateInstance. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest): + The request that prompted the initiation of + this UpdateInstance operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request: 'PartialUpdateInstanceRequest' = proto.Field( + proto.MESSAGE, + number=1, + message='PartialUpdateInstanceRequest', + ) + request_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class CreateClusterMetadata(proto.Message): + r"""The metadata for the Operation returned by CreateCluster. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateClusterRequest): + The request that prompted the initiation of + this CreateCluster operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + tables (MutableMapping[str, google.cloud.bigtable_admin_v2.types.CreateClusterMetadata.TableProgress]): + Keys: the full ``name`` of each table that existed in the + instance when CreateCluster was first called, i.e. + ``projects//instances//tables/
``. + Any table added to the instance by a later API call will be + created in the new cluster by that API call, not this one. + + Values: information on how much of a table's data has been + copied to the newly-created cluster so far. + """ + + class TableProgress(proto.Message): + r"""Progress info for copying a table's data to the new cluster. + + Attributes: + estimated_size_bytes (int): + Estimate of the size of the table to be + copied. + estimated_copied_bytes (int): + Estimate of the number of bytes copied so far for this + table. This will eventually reach 'estimated_size_bytes' + unless the table copy is CANCELLED. + state (google.cloud.bigtable_admin_v2.types.CreateClusterMetadata.TableProgress.State): + + """ + class State(proto.Enum): + r""" + + Values: + STATE_UNSPECIFIED (0): + No description available. + PENDING (1): + The table has not yet begun copying to the + new cluster. + COPYING (2): + The table is actively being copied to the new + cluster. + COMPLETED (3): + The table has been fully copied to the new + cluster. + CANCELLED (4): + The table was deleted before it finished + copying to the new cluster. Note that tables + deleted after completion will stay marked as + COMPLETED, not CANCELLED. + """ + STATE_UNSPECIFIED = 0 + PENDING = 1 + COPYING = 2 + COMPLETED = 3 + CANCELLED = 4 + + estimated_size_bytes: int = proto.Field( + proto.INT64, + number=2, + ) + estimated_copied_bytes: int = proto.Field( + proto.INT64, + number=3, + ) + state: 'CreateClusterMetadata.TableProgress.State' = proto.Field( + proto.ENUM, + number=4, + enum='CreateClusterMetadata.TableProgress.State', + ) + + original_request: 'CreateClusterRequest' = proto.Field( + proto.MESSAGE, + number=1, + message='CreateClusterRequest', + ) + request_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + tables: MutableMapping[str, TableProgress] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=4, + message=TableProgress, + ) + + +class UpdateClusterMetadata(proto.Message): + r"""The metadata for the Operation returned by UpdateCluster. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.Cluster): + The request that prompted the initiation of + this UpdateCluster operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request: gba_instance.Cluster = proto.Field( + proto.MESSAGE, + number=1, + message=gba_instance.Cluster, + ) + request_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class PartialUpdateClusterMetadata(proto.Message): + r"""The metadata for the Operation returned by + PartialUpdateCluster. + + Attributes: + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + original_request (google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest): + The original request for + PartialUpdateCluster. + """ + + request_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=1, + message=timestamp_pb2.Timestamp, + ) + finish_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + original_request: 'PartialUpdateClusterRequest' = proto.Field( + proto.MESSAGE, + number=3, + message='PartialUpdateClusterRequest', + ) + + +class PartialUpdateClusterRequest(proto.Message): + r"""Request message for + BigtableInstanceAdmin.PartialUpdateCluster. + + Attributes: + cluster (google.cloud.bigtable_admin_v2.types.Cluster): + Required. The Cluster which contains the partial updates to + be applied, subject to the update_mask. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of Cluster fields which + should be replaced. + """ + + cluster: gba_instance.Cluster = proto.Field( + proto.MESSAGE, + number=1, + message=gba_instance.Cluster, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class CreateAppProfileRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.CreateAppProfile. + + Attributes: + parent (str): + Required. The unique name of the instance in which to create + the new app profile. Values are of the form + ``projects/{project}/instances/{instance}``. + app_profile_id (str): + Required. The ID to be used when referring to the new app + profile within its instance, e.g., just ``myprofile`` rather + than + ``projects/myproject/instances/myinstance/appProfiles/myprofile``. + app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): + Required. The app profile to be created. Fields marked + ``OutputOnly`` will be ignored. + ignore_warnings (bool): + If true, ignore safety checks when creating + the app profile. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + app_profile_id: str = proto.Field( + proto.STRING, + number=2, + ) + app_profile: gba_instance.AppProfile = proto.Field( + proto.MESSAGE, + number=3, + message=gba_instance.AppProfile, + ) + ignore_warnings: bool = proto.Field( + proto.BOOL, + number=4, + ) + + +class GetAppProfileRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.GetAppProfile. + + Attributes: + name (str): + Required. The unique name of the requested app profile. + Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListAppProfilesRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.ListAppProfiles. + + Attributes: + parent (str): + Required. The unique name of the instance for which a list + of app profiles is requested. Values are of the form + ``projects/{project}/instances/{instance}``. Use + ``{instance} = '-'`` to list AppProfiles for all Instances + in a project, e.g., ``projects/myproject/instances/-``. + page_size (int): + Maximum number of results per page. + + A page_size of zero lets the server choose the number of + items to return. A page_size which is strictly positive will + return at most that many items. A negative page_size will + cause an error. + + Following the first request, subsequent paginated calls are + not required to pass a page_size. If a page_size is set in + subsequent calls, it must match the page_size given in the + first request. + page_token (str): + The value of ``next_page_token`` returned by a previous + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=3, + ) + page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class ListAppProfilesResponse(proto.Message): + r"""Response message for BigtableInstanceAdmin.ListAppProfiles. + + Attributes: + app_profiles (MutableSequence[google.cloud.bigtable_admin_v2.types.AppProfile]): + The list of requested app profiles. + next_page_token (str): + Set if not all app profiles could be returned in a single + response. Pass this value to ``page_token`` in another + request to get the next page of results. + failed_locations (MutableSequence[str]): + Locations from which AppProfile information could not be + retrieved, due to an outage or some other transient + condition. AppProfiles from these locations may be missing + from ``app_profiles``. Values are of the form + ``projects//locations/`` + """ + + @property + def raw_page(self): + return self + + app_profiles: MutableSequence[gba_instance.AppProfile] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_instance.AppProfile, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + failed_locations: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=3, + ) + + +class UpdateAppProfileRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.UpdateAppProfile. + + Attributes: + app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): + Required. The app profile which will + (partially) replace the current value. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The subset of app profile fields + which should be replaced. If unset, all fields + will be replaced. + ignore_warnings (bool): + If true, ignore safety checks when updating + the app profile. + """ + + app_profile: gba_instance.AppProfile = proto.Field( + proto.MESSAGE, + number=1, + message=gba_instance.AppProfile, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + ignore_warnings: bool = proto.Field( + proto.BOOL, + number=3, + ) + + +class DeleteAppProfileRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.DeleteAppProfile. + + Attributes: + name (str): + Required. The unique name of the app profile to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. + ignore_warnings (bool): + Required. If true, ignore safety checks when + deleting the app profile. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + ignore_warnings: bool = proto.Field( + proto.BOOL, + number=2, + ) + + +class UpdateAppProfileMetadata(proto.Message): + r"""The metadata for the Operation returned by UpdateAppProfile. + """ + + +class ListHotTabletsRequest(proto.Message): + r"""Request message for BigtableInstanceAdmin.ListHotTablets. + + Attributes: + parent (str): + Required. The cluster name to list hot tablets. Value is in + the following form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The start time to list hot tablets. The hot + tablets in the response will have start times + between the requested start time and end time. + Start time defaults to Now if it is unset, and + end time defaults to Now - 24 hours if it is + unset. The start time should be less than the + end time, and the maximum allowed time range + between start time and end time is 48 hours. + Start time and end time should have values + between Now and Now - 14 days. + end_time (google.protobuf.timestamp_pb2.Timestamp): + The end time to list hot tablets. + page_size (int): + Maximum number of results per page. + + A page_size that is empty or zero lets the server choose the + number of items to return. A page_size which is strictly + positive will return at most that many items. A negative + page_size will cause an error. + + Following the first request, subsequent paginated calls do + not need a page_size field. If a page_size is set in + subsequent calls, it must match the page_size given in the + first request. + page_token (str): + The value of ``next_page_token`` returned by a previous + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + page_size: int = proto.Field( + proto.INT32, + number=4, + ) + page_token: str = proto.Field( + proto.STRING, + number=5, + ) + + +class ListHotTabletsResponse(proto.Message): + r"""Response message for BigtableInstanceAdmin.ListHotTablets. + + Attributes: + hot_tablets (MutableSequence[google.cloud.bigtable_admin_v2.types.HotTablet]): + List of hot tablets in the tables of the + requested cluster that fall within the requested + time range. Hot tablets are ordered by node cpu + usage percent. If there are multiple hot tablets + that correspond to the same tablet within a + 15-minute interval, only the hot tablet with the + highest node cpu usage will be included in the + response. + next_page_token (str): + Set if not all hot tablets could be returned in a single + response. Pass this value to ``page_token`` in another + request to get the next page of results. + """ + + @property + def raw_page(self): + return self + + hot_tablets: MutableSequence[gba_instance.HotTablet] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_instance.HotTablet, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py new file mode 100644 index 000000000..11ee1854a --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -0,0 +1,1364 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.bigtable.admin.v2', + manifest={ + 'RestoreTableRequest', + 'RestoreTableMetadata', + 'OptimizeRestoredTableMetadata', + 'CreateTableRequest', + 'CreateTableFromSnapshotRequest', + 'DropRowRangeRequest', + 'ListTablesRequest', + 'ListTablesResponse', + 'GetTableRequest', + 'UpdateTableRequest', + 'UpdateTableMetadata', + 'DeleteTableRequest', + 'UndeleteTableRequest', + 'UndeleteTableMetadata', + 'ModifyColumnFamiliesRequest', + 'GenerateConsistencyTokenRequest', + 'GenerateConsistencyTokenResponse', + 'CheckConsistencyRequest', + 'CheckConsistencyResponse', + 'SnapshotTableRequest', + 'GetSnapshotRequest', + 'ListSnapshotsRequest', + 'ListSnapshotsResponse', + 'DeleteSnapshotRequest', + 'SnapshotTableMetadata', + 'CreateTableFromSnapshotMetadata', + 'CreateBackupRequest', + 'CreateBackupMetadata', + 'UpdateBackupRequest', + 'GetBackupRequest', + 'DeleteBackupRequest', + 'ListBackupsRequest', + 'ListBackupsResponse', + 'CopyBackupRequest', + 'CopyBackupMetadata', + }, +) + + +class RestoreTableRequest(proto.Message): + r"""The request for + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + parent (str): + Required. The name of the instance in which to create the + restored table. Values are of the form + ``projects//instances/``. + table_id (str): + Required. The id of the table to create and restore to. This + table must not already exist. The ``table_id`` appended to + ``parent`` forms the full table name of the form + ``projects//instances//tables/``. + backup (str): + Name of the backup from which to restore. Values are of the + form + ``projects//instances//clusters//backups/``. + + This field is a member of `oneof`_ ``source``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + table_id: str = proto.Field( + proto.STRING, + number=2, + ) + backup: str = proto.Field( + proto.STRING, + number=3, + oneof='source', + ) + + +class RestoreTableMetadata(proto.Message): + r"""Metadata type for the long-running operation returned by + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Name of the table being created and restored + to. + source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType): + The type of the restore source. + backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): + + This field is a member of `oneof`_ ``source_info``. + optimize_table_operation_name (str): + If exists, the name of the long-running operation that will + be used to track the post-restore optimization process to + optimize the performance of the restored table. The metadata + type of the long-running operation is + [OptimizeRestoreTableMetadata][]. The response type is + [Empty][google.protobuf.Empty]. This long-running operation + may be automatically created by the system if applicable + after the RestoreTable long-running operation completes + successfully. This operation may not be created if the table + is already optimized or the restore was not successful. + progress (google.cloud.bigtable_admin_v2.types.OperationProgress): + The progress of the + [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] + operation. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + source_type: gba_table.RestoreSourceType = proto.Field( + proto.ENUM, + number=2, + enum=gba_table.RestoreSourceType, + ) + backup_info: gba_table.BackupInfo = proto.Field( + proto.MESSAGE, + number=3, + oneof='source_info', + message=gba_table.BackupInfo, + ) + optimize_table_operation_name: str = proto.Field( + proto.STRING, + number=4, + ) + progress: common.OperationProgress = proto.Field( + proto.MESSAGE, + number=5, + message=common.OperationProgress, + ) + + +class OptimizeRestoredTableMetadata(proto.Message): + r"""Metadata type for the long-running operation used to track + the progress of optimizations performed on a newly restored + table. This long-running operation is automatically created by + the system after the successful completion of a table restore, + and cannot be cancelled. + + Attributes: + name (str): + Name of the restored table being optimized. + progress (google.cloud.bigtable_admin_v2.types.OperationProgress): + The progress of the post-restore + optimizations. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + progress: common.OperationProgress = proto.Field( + proto.MESSAGE, + number=2, + message=common.OperationProgress, + ) + + +class CreateTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] + + Attributes: + parent (str): + Required. The unique name of the instance in which to create + the table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id (str): + Required. The name by which the new table should be referred + to within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. Maximum 50 characters. + table (google.cloud.bigtable_admin_v2.types.Table): + Required. The Table to create. + initial_splits (MutableSequence[google.cloud.bigtable_admin_v2.types.CreateTableRequest.Split]): + The optional list of row keys that will be used to initially + split the table into several tablets (tablets are similar to + HBase regions). Given two split keys, ``s1`` and ``s2``, + three tablets will be created, spanning the key ranges: + ``[, s1), [s1, s2), [s2, )``. + + Example: + + - Row keys := + ``["a", "apple", "custom", "customer_1", "customer_2",`` + ``"other", "zz"]`` + - initial_split_keys := + ``["apple", "customer_1", "customer_2", "other"]`` + - Key assignment: + + - Tablet 1 ``[, apple) => {"a"}.`` + - Tablet 2 + ``[apple, customer_1) => {"apple", "custom"}.`` + - Tablet 3 + ``[customer_1, customer_2) => {"customer_1"}.`` + - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` + - Tablet 5 ``[other, ) => {"other", "zz"}.`` + """ + + class Split(proto.Message): + r"""An initial split point for a newly created table. + + Attributes: + key (bytes): + Row key to use as an initial tablet boundary. + """ + + key: bytes = proto.Field( + proto.BYTES, + number=1, + ) + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + table_id: str = proto.Field( + proto.STRING, + number=2, + ) + table: gba_table.Table = proto.Field( + proto.MESSAGE, + number=3, + message=gba_table.Table, + ) + initial_splits: MutableSequence[Split] = proto.RepeatedField( + proto.MESSAGE, + number=4, + message=Split, + ) + + +class CreateTableFromSnapshotRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + parent (str): + Required. The unique name of the instance in which to create + the table. Values are of the form + ``projects/{project}/instances/{instance}``. + table_id (str): + Required. The name by which the new table should be referred + to within the parent instance, e.g., ``foobar`` rather than + ``{parent}/tables/foobar``. + source_snapshot (str): + Required. The unique name of the snapshot from which to + restore the table. The snapshot and the table must be in the + same instance. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + table_id: str = proto.Field( + proto.STRING, + number=2, + ) + source_snapshot: str = proto.Field( + proto.STRING, + number=3, + ) + + +class DropRowRangeRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + Required. The unique name of the table on which to drop a + range of rows. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + row_key_prefix (bytes): + Delete all rows that start with this row key + prefix. Prefix cannot be zero length. + + This field is a member of `oneof`_ ``target``. + delete_all_data_from_table (bool): + Delete all rows in the table. Setting this to + false is a no-op. + + This field is a member of `oneof`_ ``target``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + row_key_prefix: bytes = proto.Field( + proto.BYTES, + number=2, + oneof='target', + ) + delete_all_data_from_table: bool = proto.Field( + proto.BOOL, + number=3, + oneof='target', + ) + + +class ListTablesRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Attributes: + parent (str): + Required. The unique name of the instance for which tables + should be listed. Values are of the form + ``projects/{project}/instances/{instance}``. + view (google.cloud.bigtable_admin_v2.types.Table.View): + The view to be applied to the returned tables' fields. + NAME_ONLY view (default) and REPLICATION_VIEW are supported. + page_size (int): + Maximum number of results per page. + + A page_size of zero lets the server choose the number of + items to return. A page_size which is strictly positive will + return at most that many items. A negative page_size will + cause an error. + + Following the first request, subsequent paginated calls are + not required to pass a page_size. If a page_size is set in + subsequent calls, it must match the page_size given in the + first request. + page_token (str): + The value of ``next_page_token`` returned by a previous + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + view: gba_table.Table.View = proto.Field( + proto.ENUM, + number=2, + enum=gba_table.Table.View, + ) + page_size: int = proto.Field( + proto.INT32, + number=4, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListTablesResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] + + Attributes: + tables (MutableSequence[google.cloud.bigtable_admin_v2.types.Table]): + The tables present in the requested instance. + next_page_token (str): + Set if not all tables could be returned in a single + response. Pass this value to ``page_token`` in another + request to get the next page of results. + """ + + @property + def raw_page(self): + return self + + tables: MutableSequence[gba_table.Table] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_table.Table, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class GetTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] + + Attributes: + name (str): + Required. The unique name of the requested table. Values are + of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + view (google.cloud.bigtable_admin_v2.types.Table.View): + The view to be applied to the returned table's fields. + Defaults to ``SCHEMA_VIEW`` if unspecified. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + view: gba_table.Table.View = proto.Field( + proto.ENUM, + number=2, + enum=gba_table.Table.View, + ) + + +class UpdateTableRequest(proto.Message): + r"""The request for + [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. + + Attributes: + table (google.cloud.bigtable_admin_v2.types.Table): + Required. The table to update. The table's ``name`` field is + used to identify the table to update. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. The list of fields to update. A mask specifying + which fields (e.g. ``change_stream_config``) in the + ``table`` field should be updated. This mask is relative to + the ``table`` field, not to the request message. The + wildcard (*) path is currently not supported. Currently + UpdateTable is only supported for the following fields: + + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + + If ``column_families`` is set in ``update_mask``, it will + return an UNIMPLEMENTED error. + """ + + table: gba_table.Table = proto.Field( + proto.MESSAGE, + number=1, + message=gba_table.Table, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class UpdateTableMetadata(proto.Message): + r"""Metadata type for the operation returned by + [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. + + Attributes: + name (str): + The name of the table being updated. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was canceled. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class DeleteTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] + + Attributes: + name (str): + Required. The unique name of the table to be deleted. Values + are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UndeleteTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] + + Attributes: + name (str): + Required. The unique name of the table to be restored. + Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class UndeleteTableMetadata(proto.Message): + r"""Metadata type for the operation returned by + [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable]. + + Attributes: + name (str): + The name of the table being restored. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was cancelled. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class ModifyColumnFamiliesRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] + + Attributes: + name (str): + Required. The unique name of the table whose families should + be modified. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + modifications (MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): + Required. Modifications to be atomically + applied to the specified table's families. + Entries are applied in order, meaning that + earlier modifications can be masked by later + ones (in the case of repeated updates to the + same family, for example). + """ + + class Modification(proto.Message): + r"""A create, update, or delete of a particular column family. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + id (str): + The ID of the column family to be modified. + create (google.cloud.bigtable_admin_v2.types.ColumnFamily): + Create a new column family with the specified + schema, or fail if one already exists with the + given ID. + + This field is a member of `oneof`_ ``mod``. + update (google.cloud.bigtable_admin_v2.types.ColumnFamily): + Update an existing column family to the + specified schema, or fail if no column family + exists with the given ID. + + This field is a member of `oneof`_ ``mod``. + drop (bool): + Drop (delete) the column family with the + given ID, or fail if no such family exists. + + This field is a member of `oneof`_ ``mod``. + """ + + id: str = proto.Field( + proto.STRING, + number=1, + ) + create: gba_table.ColumnFamily = proto.Field( + proto.MESSAGE, + number=2, + oneof='mod', + message=gba_table.ColumnFamily, + ) + update: gba_table.ColumnFamily = proto.Field( + proto.MESSAGE, + number=3, + oneof='mod', + message=gba_table.ColumnFamily, + ) + drop: bool = proto.Field( + proto.BOOL, + number=4, + oneof='mod', + ) + + name: str = proto.Field( + proto.STRING, + number=1, + ) + modifications: MutableSequence[Modification] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message=Modification, + ) + + +class GenerateConsistencyTokenRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + Attributes: + name (str): + Required. The unique name of the Table for which to create a + consistency token. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class GenerateConsistencyTokenResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] + + Attributes: + consistency_token (str): + The generated consistency token. + """ + + consistency_token: str = proto.Field( + proto.STRING, + number=1, + ) + + +class CheckConsistencyRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + Attributes: + name (str): + Required. The unique name of the Table for which to check + replication consistency. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + consistency_token (str): + Required. The token created using + GenerateConsistencyToken for the Table. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + consistency_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class CheckConsistencyResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] + + Attributes: + consistent (bool): + True only if the token is consistent. A token + is consistent if replication has caught up with + the restrictions specified in the request. + """ + + consistent: bool = proto.Field( + proto.BOOL, + number=1, + ) + + +class SnapshotTableRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + name (str): + Required. The unique name of the table to have the snapshot + taken. Values are of the form + ``projects/{project}/instances/{instance}/tables/{table}``. + cluster (str): + Required. The name of the cluster where the snapshot will be + created in. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + snapshot_id (str): + Required. The ID by which the new snapshot should be + referred to within the parent cluster, e.g., ``mysnapshot`` + of the form: ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. + ttl (google.protobuf.duration_pb2.Duration): + The amount of time that the new snapshot can + stay active after it is created. Once 'ttl' + expires, the snapshot will get deleted. The + maximum amount of time a snapshot can stay + active is 7 days. If 'ttl' is not specified, the + default value of 24 hours will be used. + description (str): + Description of the snapshot. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + cluster: str = proto.Field( + proto.STRING, + number=2, + ) + snapshot_id: str = proto.Field( + proto.STRING, + number=3, + ) + ttl: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=4, + message=duration_pb2.Duration, + ) + description: str = proto.Field( + proto.STRING, + number=5, + ) + + +class GetSnapshotRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + name (str): + Required. The unique name of the requested snapshot. Values + are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListSnapshotsRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + parent (str): + Required. The unique name of the cluster for which snapshots + should be listed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list snapshots for all clusters + in an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + page_size (int): + The maximum number of snapshots to return per + page. CURRENTLY UNIMPLEMENTED AND IGNORED. + page_token (str): + The value of ``next_page_token`` returned by a previous + call. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + page_size: int = proto.Field( + proto.INT32, + number=2, + ) + page_token: str = proto.Field( + proto.STRING, + number=3, + ) + + +class ListSnapshotsResponse(proto.Message): + r"""Response message for + [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + snapshots (MutableSequence[google.cloud.bigtable_admin_v2.types.Snapshot]): + The snapshots present in the requested + cluster. + next_page_token (str): + Set if not all snapshots could be returned in a single + response. Pass this value to ``page_token`` in another + request to get the next page of results. + """ + + @property + def raw_page(self): + return self + + snapshots: MutableSequence[gba_table.Snapshot] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_table.Snapshot, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class DeleteSnapshotRequest(proto.Message): + r"""Request message for + [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + + Note: This is a private alpha release of Cloud Bigtable snapshots. + This feature is not currently available to most Cloud Bigtable + customers. This feature might be changed in backward-incompatible + ways and is not recommended for production use. It is not subject to + any SLA or deprecation policy. + + Attributes: + name (str): + Required. The unique name of the snapshot to be deleted. + Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class SnapshotTableMetadata(proto.Message): + r"""The metadata for the Operation returned by SnapshotTable. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to most Cloud + Bigtable customers. This feature might be changed in + backward-incompatible ways and is not recommended for production + use. It is not subject to any SLA or deprecation policy. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.SnapshotTableRequest): + The request that prompted the initiation of + this SnapshotTable operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request: 'SnapshotTableRequest' = proto.Field( + proto.MESSAGE, + number=1, + message='SnapshotTableRequest', + ) + request_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class CreateTableFromSnapshotMetadata(proto.Message): + r"""The metadata for the Operation returned by + CreateTableFromSnapshot. + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to most Cloud + Bigtable customers. This feature might be changed in + backward-incompatible ways and is not recommended for production + use. It is not subject to any SLA or deprecation policy. + + Attributes: + original_request (google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest): + The request that prompted the initiation of + this CreateTableFromSnapshot operation. + request_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the original request was + received. + finish_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which the operation failed or was + completed successfully. + """ + + original_request: 'CreateTableFromSnapshotRequest' = proto.Field( + proto.MESSAGE, + number=1, + message='CreateTableFromSnapshotRequest', + ) + request_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + finish_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +class CreateBackupRequest(proto.Message): + r"""The request for + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + + Attributes: + parent (str): + Required. This must be one of the clusters in the instance + in which this table is located. The backup will be stored in + this cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + backup_id (str): + Required. The id of the backup to be created. The + ``backup_id`` along with the parent ``parent`` are combined + as {parent}/backups/{backup_id} to create the full backup + name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in length + and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to create. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + backup_id: str = proto.Field( + proto.STRING, + number=2, + ) + backup: gba_table.Backup = proto.Field( + proto.MESSAGE, + number=3, + message=gba_table.Backup, + ) + + +class CreateBackupMetadata(proto.Message): + r"""Metadata type for the operation returned by + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. + + Attributes: + name (str): + The name of the backup being created. + source_table (str): + The name of the table the backup is created + from. + start_time (google.protobuf.timestamp_pb2.Timestamp): + The time at which this operation started. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + finished or was cancelled. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + source_table: str = proto.Field( + proto.STRING, + number=2, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class UpdateBackupRequest(proto.Message): + r"""The request for + [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. + + Attributes: + backup (google.cloud.bigtable_admin_v2.types.Backup): + Required. The backup to update. ``backup.name``, and the + fields to be updated as specified by ``update_mask`` are + required. Other fields are ignored. Update is only supported + for the following fields: + + - ``backup.expire_time``. + update_mask (google.protobuf.field_mask_pb2.FieldMask): + Required. A mask specifying which fields (e.g. + ``expire_time``) in the Backup resource should be updated. + This mask is relative to the Backup resource, not to the + request message. The field mask must always be specified; + this prevents any future fields from being erased + accidentally by clients that do not know about them. + """ + + backup: gba_table.Backup = proto.Field( + proto.MESSAGE, + number=1, + message=gba_table.Backup, + ) + update_mask: field_mask_pb2.FieldMask = proto.Field( + proto.MESSAGE, + number=2, + message=field_mask_pb2.FieldMask, + ) + + +class GetBackupRequest(proto.Message): + r"""The request for + [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. + + Attributes: + name (str): + Required. Name of the backup. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class DeleteBackupRequest(proto.Message): + r"""The request for + [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. + + Attributes: + name (str): + Required. Name of the backup to delete. Values are of the + form + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + + +class ListBackupsRequest(proto.Message): + r"""The request for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Attributes: + parent (str): + Required. The cluster to list backups from. Values are of + the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + Use ``{cluster} = '-'`` to list backups for all clusters in + an instance, e.g., + ``projects/{project}/instances/{instance}/clusters/-``. + filter (str): + A filter expression that filters backups listed in the + response. The expression must specify the field name, a + comparison operator, and the value that you want to use for + filtering. The value must be a string, a number, or a + boolean. The comparison operator must be <, >, <=, >=, !=, + =, or :. Colon ':' represents a HAS operator which is + roughly synonymous with equality. Filter rules are case + insensitive. + + The fields eligible for filtering are: + + - ``name`` + - ``source_table`` + - ``state`` + - ``start_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``end_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``expire_time`` (and values are of the format + YYYY-MM-DDTHH:MM:SSZ) + - ``size_bytes`` + + To filter on multiple expressions, provide each separate + expression within parentheses. By default, each expression + is an AND expression. However, you can include AND, OR, and + NOT expressions explicitly. + + Some examples of using filters are: + + - ``name:"exact"`` --> The backup's name is the string + "exact". + - ``name:howl`` --> The backup's name contains the string + "howl". + - ``source_table:prod`` --> The source_table's name + contains the string "prod". + - ``state:CREATING`` --> The backup is pending creation. + - ``state:READY`` --> The backup is fully created and ready + for use. + - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` + --> The backup name contains the string "howl" and + start_time of the backup is before 2018-03-28T14:50:00Z. + - ``size_bytes > 10000000000`` --> The backup's size is + greater than 10GB + order_by (str): + An expression for specifying the sort order of the results + of the request. The string value should specify one or more + fields in [Backup][google.bigtable.admin.v2.Backup]. The + full syntax is described at https://aip.dev/132#ordering. + + Fields supported are: + + - name + - source_table + - expire_time + - start_time + - end_time + - size_bytes + - state + + For example, "start_time". The default sorting order is + ascending. To specify descending order for the field, a + suffix " desc" should be appended to the field name. For + example, "start_time desc". Redundant space characters in + the syntax are insigificant. + + If order_by is empty, results will be sorted by + ``start_time`` in descending order starting from the most + recently created backup. + page_size (int): + Number of backups to be returned in the + response. If 0 or less, defaults to the server's + maximum allowed page size. + page_token (str): + If non-empty, ``page_token`` should contain a + [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] + from a previous + [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] + to the same ``parent`` and with the same ``filter``. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + filter: str = proto.Field( + proto.STRING, + number=2, + ) + order_by: str = proto.Field( + proto.STRING, + number=3, + ) + page_size: int = proto.Field( + proto.INT32, + number=4, + ) + page_token: str = proto.Field( + proto.STRING, + number=5, + ) + + +class ListBackupsResponse(proto.Message): + r"""The response for + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + + Attributes: + backups (MutableSequence[google.cloud.bigtable_admin_v2.types.Backup]): + The list of matching backups. + next_page_token (str): + ``next_page_token`` can be sent in a subsequent + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] + call to fetch more of the matching backups. + """ + + @property + def raw_page(self): + return self + + backups: MutableSequence[gba_table.Backup] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message=gba_table.Backup, + ) + next_page_token: str = proto.Field( + proto.STRING, + number=2, + ) + + +class CopyBackupRequest(proto.Message): + r"""The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + + Attributes: + parent (str): + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already exists. + Values are of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + backup_id (str): + Required. The id of the new backup. The ``backup_id`` along + with ``parent`` are combined as {parent}/backups/{backup_id} + to create the full backup name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in length + and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + source_backup (str): + Required. The source backup to be copied from. The source + backup needs to be in READY state for it to be copied. + Copying a copied backup is not allowed. Once CopyBackup is + in progress, the source backup cannot be deleted or cleaned + up on expiration until CopyBackup is finished. Values are of + the form: + ``projects//instances//clusters//backups/``. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Required. Required. The expiration time of the copied backup + with microsecond granularity that must be at least 6 hours + and at most 30 days from the time the request is received. + Once the ``expire_time`` has passed, Cloud Bigtable will + delete the backup and free the resources used by the backup. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + backup_id: str = proto.Field( + proto.STRING, + number=2, + ) + source_backup: str = proto.Field( + proto.STRING, + number=3, + ) + expire_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class CopyBackupMetadata(proto.Message): + r"""Metadata type for the google.longrunning.Operation returned by + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + + Attributes: + name (str): + The name of the backup being created through the copy + operation. Values are of the form + ``projects//instances//clusters//backups/``. + source_backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): + Information about the source backup that is + being copied from. + progress (google.cloud.bigtable_admin_v2.types.OperationProgress): + The progress of the + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup] + operation. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + source_backup_info: gba_table.BackupInfo = proto.Field( + proto.MESSAGE, + number=2, + message=gba_table.BackupInfo, + ) + progress: common.OperationProgress = proto.Field( + proto.MESSAGE, + number=3, + message=common.OperationProgress, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/common.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/common.py new file mode 100644 index 000000000..76e9cd894 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/common.py @@ -0,0 +1,81 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.bigtable.admin.v2', + manifest={ + 'StorageType', + 'OperationProgress', + }, +) + + +class StorageType(proto.Enum): + r"""Storage media types for persisting Bigtable data. + + Values: + STORAGE_TYPE_UNSPECIFIED (0): + The user did not specify a storage type. + SSD (1): + Flash (SSD) storage should be used. + HDD (2): + Magnetic drive (HDD) storage should be used. + """ + STORAGE_TYPE_UNSPECIFIED = 0 + SSD = 1 + HDD = 2 + + +class OperationProgress(proto.Message): + r"""Encapsulates progress related information for a Cloud + Bigtable long running operation. + + Attributes: + progress_percent (int): + Percent completion of the operation. + Values are between 0 and 100 inclusive. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Time the request was received. + end_time (google.protobuf.timestamp_pb2.Timestamp): + If set, the time at which this operation + failed or was completed successfully. + """ + + progress_percent: int = proto.Field( + proto.INT32, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/instance.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/instance.py new file mode 100644 index 000000000..0aeaef0d3 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/instance.py @@ -0,0 +1,620 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.cloud.bigtable_admin_v2.types import common +from google.protobuf import timestamp_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.bigtable.admin.v2', + manifest={ + 'Instance', + 'AutoscalingTargets', + 'AutoscalingLimits', + 'Cluster', + 'AppProfile', + 'HotTablet', + }, +) + + +class Instance(proto.Message): + r"""A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] + and the resources that serve them. All tables in an instance are + served from all [Clusters][google.bigtable.admin.v2.Cluster] in the + instance. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + The unique name of the instance. Values are of the form + ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. + display_name (str): + Required. The descriptive name for this + instance as it appears in UIs. Can be changed at + any time, but should be kept globally unique to + avoid confusion. + state (google.cloud.bigtable_admin_v2.types.Instance.State): + (``OutputOnly``) The current state of the instance. + type_ (google.cloud.bigtable_admin_v2.types.Instance.Type): + The type of the instance. Defaults to ``PRODUCTION``. + labels (MutableMapping[str, str]): + Labels are a flexible and lightweight mechanism for + organizing cloud resources into groups that reflect a + customer's organizational needs and deployment strategies. + They can be used to filter resources and aggregate metrics. + + - Label keys must be between 1 and 63 characters long and + must conform to the regular expression: + ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. + - Label values must be between 0 and 63 characters long and + must conform to the regular expression: + ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. + - No more than 64 labels can be associated with a given + resource. + - Keys and values must both be under 128 bytes. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. A server-assigned timestamp representing when + this Instance was created. For instances created before this + field was added (August 2021), this value is + ``seconds: 0, nanos: 1``. + satisfies_pzs (bool): + Output only. Reserved for future use. + + This field is a member of `oneof`_ ``_satisfies_pzs``. + """ + class State(proto.Enum): + r"""Possible states of an instance. + + Values: + STATE_NOT_KNOWN (0): + The state of the instance could not be + determined. + READY (1): + The instance has been successfully created + and can serve requests to its tables. + CREATING (2): + The instance is currently being created, and + may be destroyed if the creation process + encounters an error. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + class Type(proto.Enum): + r"""The type of the instance. + + Values: + TYPE_UNSPECIFIED (0): + The type of the instance is unspecified. If set when + creating an instance, a ``PRODUCTION`` instance will be + created. If set when updating an instance, the type will be + left unchanged. + PRODUCTION (1): + An instance meant for production use. ``serve_nodes`` must + be set on the cluster. + DEVELOPMENT (2): + DEPRECATED: Prefer PRODUCTION for all use + cases, as it no longer enforces a higher minimum + node count than DEVELOPMENT. + """ + TYPE_UNSPECIFIED = 0 + PRODUCTION = 1 + DEVELOPMENT = 2 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + display_name: str = proto.Field( + proto.STRING, + number=2, + ) + state: State = proto.Field( + proto.ENUM, + number=3, + enum=State, + ) + type_: Type = proto.Field( + proto.ENUM, + number=4, + enum=Type, + ) + labels: MutableMapping[str, str] = proto.MapField( + proto.STRING, + proto.STRING, + number=5, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=7, + message=timestamp_pb2.Timestamp, + ) + satisfies_pzs: bool = proto.Field( + proto.BOOL, + number=8, + optional=True, + ) + + +class AutoscalingTargets(proto.Message): + r"""The Autoscaling targets for a Cluster. These determine the + recommended nodes. + + Attributes: + cpu_utilization_percent (int): + The cpu utilization that the Autoscaler should be trying to + achieve. This number is on a scale from 0 (no utilization) + to 100 (total utilization), and is limited between 10 and + 80, otherwise it will return INVALID_ARGUMENT error. + storage_utilization_gib_per_node (int): + The storage utilization that the Autoscaler should be trying + to achieve. This number is limited between 2560 (2.5TiB) and + 5120 (5TiB) for a SSD cluster and between 8192 (8TiB) and + 16384 (16TiB) for an HDD cluster, otherwise it will return + INVALID_ARGUMENT error. If this value is set to 0, it will + be treated as if it were set to the default value: 2560 for + SSD, 8192 for HDD. + """ + + cpu_utilization_percent: int = proto.Field( + proto.INT32, + number=2, + ) + storage_utilization_gib_per_node: int = proto.Field( + proto.INT32, + number=3, + ) + + +class AutoscalingLimits(proto.Message): + r"""Limits for the number of nodes a Cluster can autoscale + up/down to. + + Attributes: + min_serve_nodes (int): + Required. Minimum number of nodes to scale + down to. + max_serve_nodes (int): + Required. Maximum number of nodes to scale up + to. + """ + + min_serve_nodes: int = proto.Field( + proto.INT32, + number=1, + ) + max_serve_nodes: int = proto.Field( + proto.INT32, + number=2, + ) + + +class Cluster(proto.Message): + r"""A resizable group of nodes in a particular cloud location, capable + of serving all [Tables][google.bigtable.admin.v2.Table] in the + parent [Instance][google.bigtable.admin.v2.Instance]. + + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + The unique name of the cluster. Values are of the form + ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. + location (str): + Immutable. The location where this cluster's nodes and + storage reside. For best performance, clients should be + located as close as possible to this cluster. Currently only + zones are supported, so values should be of the form + ``projects/{project}/locations/{zone}``. + state (google.cloud.bigtable_admin_v2.types.Cluster.State): + Output only. The current state of the + cluster. + serve_nodes (int): + The number of nodes allocated to this + cluster. More nodes enable higher throughput and + more consistent performance. + cluster_config (google.cloud.bigtable_admin_v2.types.Cluster.ClusterConfig): + Configuration for this cluster. + + This field is a member of `oneof`_ ``config``. + default_storage_type (google.cloud.bigtable_admin_v2.types.StorageType): + Immutable. The type of storage used by this + cluster to serve its parent instance's tables, + unless explicitly overridden. + encryption_config (google.cloud.bigtable_admin_v2.types.Cluster.EncryptionConfig): + Immutable. The encryption configuration for + CMEK-protected clusters. + """ + class State(proto.Enum): + r"""Possible states of a cluster. + + Values: + STATE_NOT_KNOWN (0): + The state of the cluster could not be + determined. + READY (1): + The cluster has been successfully created and + is ready to serve requests. + CREATING (2): + The cluster is currently being created, and + may be destroyed if the creation process + encounters an error. A cluster may not be able + to serve requests while being created. + RESIZING (3): + The cluster is currently being resized, and + may revert to its previous node count if the + process encounters an error. A cluster is still + capable of serving requests while being resized, + but may exhibit performance as if its number of + allocated nodes is between the starting and + requested states. + DISABLED (4): + The cluster has no backing nodes. The data + (tables) still exist, but no operations can be + performed on the cluster. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + RESIZING = 3 + DISABLED = 4 + + class ClusterAutoscalingConfig(proto.Message): + r"""Autoscaling config for a cluster. + + Attributes: + autoscaling_limits (google.cloud.bigtable_admin_v2.types.AutoscalingLimits): + Required. Autoscaling limits for this + cluster. + autoscaling_targets (google.cloud.bigtable_admin_v2.types.AutoscalingTargets): + Required. Autoscaling targets for this + cluster. + """ + + autoscaling_limits: 'AutoscalingLimits' = proto.Field( + proto.MESSAGE, + number=1, + message='AutoscalingLimits', + ) + autoscaling_targets: 'AutoscalingTargets' = proto.Field( + proto.MESSAGE, + number=2, + message='AutoscalingTargets', + ) + + class ClusterConfig(proto.Message): + r"""Configuration for a cluster. + + Attributes: + cluster_autoscaling_config (google.cloud.bigtable_admin_v2.types.Cluster.ClusterAutoscalingConfig): + Autoscaling configuration for this cluster. + """ + + cluster_autoscaling_config: 'Cluster.ClusterAutoscalingConfig' = proto.Field( + proto.MESSAGE, + number=1, + message='Cluster.ClusterAutoscalingConfig', + ) + + class EncryptionConfig(proto.Message): + r"""Cloud Key Management Service (Cloud KMS) settings for a + CMEK-protected cluster. + + Attributes: + kms_key_name (str): + Describes the Cloud KMS encryption key that will be used to + protect the destination Bigtable cluster. The requirements + for this key are: + + 1) The Cloud Bigtable service account associated with the + project that contains this cluster must be granted the + ``cloudkms.cryptoKeyEncrypterDecrypter`` role on the CMEK + key. + 2) Only regional keys can be used and the region of the CMEK + key must match the region of the cluster. + 3) All clusters within an instance must use the same CMEK + key. Values are of the form + ``projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}`` + """ + + kms_key_name: str = proto.Field( + proto.STRING, + number=1, + ) + + name: str = proto.Field( + proto.STRING, + number=1, + ) + location: str = proto.Field( + proto.STRING, + number=2, + ) + state: State = proto.Field( + proto.ENUM, + number=3, + enum=State, + ) + serve_nodes: int = proto.Field( + proto.INT32, + number=4, + ) + cluster_config: ClusterConfig = proto.Field( + proto.MESSAGE, + number=7, + oneof='config', + message=ClusterConfig, + ) + default_storage_type: common.StorageType = proto.Field( + proto.ENUM, + number=5, + enum=common.StorageType, + ) + encryption_config: EncryptionConfig = proto.Field( + proto.MESSAGE, + number=6, + message=EncryptionConfig, + ) + + +class AppProfile(proto.Message): + r"""A configuration object describing how Cloud Bigtable should + treat traffic from a particular end user application. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + name (str): + The unique name of the app profile. Values are of the form + ``projects/{project}/instances/{instance}/appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + etag (str): + Strongly validated etag for optimistic concurrency control. + Preserve the value returned from ``GetAppProfile`` when + calling ``UpdateAppProfile`` to fail the request if there + has been a modification in the mean time. The + ``update_mask`` of the request need not include ``etag`` for + this protection to apply. See + `Wikipedia `__ and + `RFC + 7232 `__ + for more details. + description (str): + Long form description of the use case for + this AppProfile. + multi_cluster_routing_use_any (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny): + Use a multi-cluster routing policy. + + This field is a member of `oneof`_ ``routing_policy``. + single_cluster_routing (google.cloud.bigtable_admin_v2.types.AppProfile.SingleClusterRouting): + Use a single-cluster routing policy. + + This field is a member of `oneof`_ ``routing_policy``. + priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority): + This field has been deprecated in favor of + ``standard_isolation.priority``. If you set this field, + ``standard_isolation.priority`` will be set instead. + + The priority of requests sent using this app profile. + + This field is a member of `oneof`_ ``isolation``. + standard_isolation (google.cloud.bigtable_admin_v2.types.AppProfile.StandardIsolation): + The standard options used for isolating this + app profile's traffic from other use cases. + + This field is a member of `oneof`_ ``isolation``. + """ + class Priority(proto.Enum): + r"""Possible priorities for an app profile. Note that higher + priority writes can sometimes queue behind lower priority writes + to the same tablet, as writes must be strictly sequenced in the + durability log. + + Values: + PRIORITY_UNSPECIFIED (0): + Default value. Mapped to PRIORITY_HIGH (the legacy behavior) + on creation. + PRIORITY_LOW (1): + No description available. + PRIORITY_MEDIUM (2): + No description available. + PRIORITY_HIGH (3): + No description available. + """ + PRIORITY_UNSPECIFIED = 0 + PRIORITY_LOW = 1 + PRIORITY_MEDIUM = 2 + PRIORITY_HIGH = 3 + + class MultiClusterRoutingUseAny(proto.Message): + r"""Read/write requests are routed to the nearest cluster in the + instance, and will fail over to the nearest cluster that is + available in the event of transient errors or delays. Clusters + in a region are considered equidistant. Choosing this option + sacrifices read-your-writes consistency to improve availability. + + Attributes: + cluster_ids (MutableSequence[str]): + The set of clusters to route to. The order is + ignored; clusters will be tried in order of + distance. If left empty, all clusters are + eligible. + """ + + cluster_ids: MutableSequence[str] = proto.RepeatedField( + proto.STRING, + number=1, + ) + + class SingleClusterRouting(proto.Message): + r"""Unconditionally routes all read/write requests to a specific + cluster. This option preserves read-your-writes consistency but + does not improve availability. + + Attributes: + cluster_id (str): + The cluster to which read/write requests + should be routed. + allow_transactional_writes (bool): + Whether or not ``CheckAndMutateRow`` and + ``ReadModifyWriteRow`` requests are allowed by this app + profile. It is unsafe to send these requests to the same + table/row/column in multiple clusters. + """ + + cluster_id: str = proto.Field( + proto.STRING, + number=1, + ) + allow_transactional_writes: bool = proto.Field( + proto.BOOL, + number=2, + ) + + class StandardIsolation(proto.Message): + r"""Standard options for isolating this app profile's traffic + from other use cases. + + Attributes: + priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority): + The priority of requests sent using this app + profile. + """ + + priority: 'AppProfile.Priority' = proto.Field( + proto.ENUM, + number=1, + enum='AppProfile.Priority', + ) + + name: str = proto.Field( + proto.STRING, + number=1, + ) + etag: str = proto.Field( + proto.STRING, + number=2, + ) + description: str = proto.Field( + proto.STRING, + number=3, + ) + multi_cluster_routing_use_any: MultiClusterRoutingUseAny = proto.Field( + proto.MESSAGE, + number=5, + oneof='routing_policy', + message=MultiClusterRoutingUseAny, + ) + single_cluster_routing: SingleClusterRouting = proto.Field( + proto.MESSAGE, + number=6, + oneof='routing_policy', + message=SingleClusterRouting, + ) + priority: Priority = proto.Field( + proto.ENUM, + number=7, + oneof='isolation', + enum=Priority, + ) + standard_isolation: StandardIsolation = proto.Field( + proto.MESSAGE, + number=11, + oneof='isolation', + message=StandardIsolation, + ) + + +class HotTablet(proto.Message): + r"""A tablet is a defined by a start and end key and is explained + in https://cloud.google.com/bigtable/docs/overview#architecture + and + https://cloud.google.com/bigtable/docs/performance#optimization. + A Hot tablet is a tablet that exhibits high average cpu usage + during the time interval from start time to end time. + + Attributes: + name (str): + The unique name of the hot tablet. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/[a-zA-Z0-9_-]*``. + table_name (str): + Name of the table that contains the tablet. Values are of + the form + ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The start time of the hot + tablet. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The end time of the hot tablet. + start_key (str): + Tablet Start Key (inclusive). + end_key (str): + Tablet End Key (inclusive). + node_cpu_usage_percent (float): + Output only. The average CPU usage spent by a node on this + tablet over the start_time to end_time time range. The + percentage is the amount of CPU used by the node to serve + the tablet, from 0% (tablet was not interacted with) to 100% + (the node spent all cycles serving the hot tablet). + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + table_name: str = proto.Field( + proto.STRING, + number=2, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + start_key: str = proto.Field( + proto.STRING, + number=5, + ) + end_key: str = proto.Field( + proto.STRING, + number=6, + ) + node_cpu_usage_percent: float = proto.Field( + proto.FLOAT, + number=7, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/table.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/table.py new file mode 100644 index 000000000..b618f1a91 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/table.py @@ -0,0 +1,727 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore + + +__protobuf__ = proto.module( + package='google.bigtable.admin.v2', + manifest={ + 'RestoreSourceType', + 'RestoreInfo', + 'ChangeStreamConfig', + 'Table', + 'ColumnFamily', + 'GcRule', + 'EncryptionInfo', + 'Snapshot', + 'Backup', + 'BackupInfo', + }, +) + + +class RestoreSourceType(proto.Enum): + r"""Indicates the type of the restore source. + + Values: + RESTORE_SOURCE_TYPE_UNSPECIFIED (0): + No restore associated. + BACKUP (1): + A backup was used as the source of the + restore. + """ + RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 + BACKUP = 1 + + +class RestoreInfo(proto.Message): + r"""Information about a table restore. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType): + The type of the restore source. + backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): + Information about the backup used to restore + the table. The backup may no longer exist. + + This field is a member of `oneof`_ ``source_info``. + """ + + source_type: 'RestoreSourceType' = proto.Field( + proto.ENUM, + number=1, + enum='RestoreSourceType', + ) + backup_info: 'BackupInfo' = proto.Field( + proto.MESSAGE, + number=2, + oneof='source_info', + message='BackupInfo', + ) + + +class ChangeStreamConfig(proto.Message): + r"""Change stream configuration. + + Attributes: + retention_period (google.protobuf.duration_pb2.Duration): + How long the change stream should be + retained. Change stream data older than the + retention period will not be returned when + reading the change stream from the table. + Values must be at least 1 day and at most 7 + days, and will be truncated to microsecond + granularity. + """ + + retention_period: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + + +class Table(proto.Message): + r"""A collection of user data indexed by row, column, and + timestamp. Each table is served using the resources of its + parent cluster. + + Attributes: + name (str): + The unique name of the table. Values are of the form + ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. + Views: ``NAME_ONLY``, ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, + ``FULL`` + cluster_states (MutableMapping[str, google.cloud.bigtable_admin_v2.types.Table.ClusterState]): + Output only. Map from cluster ID to per-cluster table state. + If it could not be determined whether or not the table has + data in a particular cluster (for example, if its zone is + unavailable), then there will be an entry for the cluster + with UNKNOWN ``replication_status``. Views: + ``REPLICATION_VIEW``, ``ENCRYPTION_VIEW``, ``FULL`` + column_families (MutableMapping[str, google.cloud.bigtable_admin_v2.types.ColumnFamily]): + The column families configured for this table, mapped by + column family ID. Views: ``SCHEMA_VIEW``, ``STATS_VIEW``, + ``FULL`` + granularity (google.cloud.bigtable_admin_v2.types.Table.TimestampGranularity): + Immutable. The granularity (i.e. ``MILLIS``) at which + timestamps are stored in this table. Timestamps not matching + the granularity will be rejected. If unspecified at creation + time, the value will be set to ``MILLIS``. Views: + ``SCHEMA_VIEW``, ``FULL``. + restore_info (google.cloud.bigtable_admin_v2.types.RestoreInfo): + Output only. If this table was restored from + another data source (e.g. a backup), this field + will be populated with information about the + restore. + change_stream_config (google.cloud.bigtable_admin_v2.types.ChangeStreamConfig): + If specified, enable the change stream on + this table. Otherwise, the change stream is + disabled and the change stream is not retained. + deletion_protection (bool): + Set to true to make the table protected against data loss. + i.e. deleting the following resources through Admin APIs are + prohibited: + + - The table. + - The column families in the table. + - The instance containing the table. + + Note one can still delete the data stored in the table + through Data APIs. + """ + class TimestampGranularity(proto.Enum): + r"""Possible timestamp granularities to use when keeping multiple + versions of data in a table. + + Values: + TIMESTAMP_GRANULARITY_UNSPECIFIED (0): + The user did not specify a granularity. + Should not be returned. When specified during + table creation, MILLIS will be used. + MILLIS (1): + The table keeps data versioned at a + granularity of 1ms. + """ + TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 + MILLIS = 1 + + class View(proto.Enum): + r"""Defines a view over a table's fields. + + Values: + VIEW_UNSPECIFIED (0): + Uses the default view for each method as + documented in its request. + NAME_ONLY (1): + Only populates ``name``. + SCHEMA_VIEW (2): + Only populates ``name`` and fields related to the table's + schema. + REPLICATION_VIEW (3): + Only populates ``name`` and fields related to the table's + replication state. + ENCRYPTION_VIEW (5): + Only populates ``name`` and fields related to the table's + encryption state. + FULL (4): + Populates all fields. + """ + VIEW_UNSPECIFIED = 0 + NAME_ONLY = 1 + SCHEMA_VIEW = 2 + REPLICATION_VIEW = 3 + ENCRYPTION_VIEW = 5 + FULL = 4 + + class ClusterState(proto.Message): + r"""The state of a table's data in a particular cluster. + + Attributes: + replication_state (google.cloud.bigtable_admin_v2.types.Table.ClusterState.ReplicationState): + Output only. The state of replication for the + table in this cluster. + encryption_info (MutableSequence[google.cloud.bigtable_admin_v2.types.EncryptionInfo]): + Output only. The encryption information for + the table in this cluster. If the encryption key + protecting this resource is customer managed, + then its version can be rotated in Cloud Key + Management Service (Cloud KMS). The primary + version of the key and its status will be + reflected here when changes propagate from Cloud + KMS. + """ + class ReplicationState(proto.Enum): + r"""Table replication states. + + Values: + STATE_NOT_KNOWN (0): + The replication state of the table is unknown + in this cluster. + INITIALIZING (1): + The cluster was recently created, and the + table must finish copying over pre-existing data + from other clusters before it can begin + receiving live replication updates and serving + Data API requests. + PLANNED_MAINTENANCE (2): + The table is temporarily unable to serve Data + API requests from this cluster due to planned + internal maintenance. + UNPLANNED_MAINTENANCE (3): + The table is temporarily unable to serve Data + API requests from this cluster due to unplanned + or emergency maintenance. + READY (4): + The table can serve Data API requests from + this cluster. Depending on replication delay, + reads may not immediately reflect the state of + the table in other clusters. + READY_OPTIMIZING (5): + The table is fully created and ready for use after a + restore, and is being optimized for performance. When + optimizations are complete, the table will transition to + ``READY`` state. + """ + STATE_NOT_KNOWN = 0 + INITIALIZING = 1 + PLANNED_MAINTENANCE = 2 + UNPLANNED_MAINTENANCE = 3 + READY = 4 + READY_OPTIMIZING = 5 + + replication_state: 'Table.ClusterState.ReplicationState' = proto.Field( + proto.ENUM, + number=1, + enum='Table.ClusterState.ReplicationState', + ) + encryption_info: MutableSequence['EncryptionInfo'] = proto.RepeatedField( + proto.MESSAGE, + number=2, + message='EncryptionInfo', + ) + + name: str = proto.Field( + proto.STRING, + number=1, + ) + cluster_states: MutableMapping[str, ClusterState] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=2, + message=ClusterState, + ) + column_families: MutableMapping[str, 'ColumnFamily'] = proto.MapField( + proto.STRING, + proto.MESSAGE, + number=3, + message='ColumnFamily', + ) + granularity: TimestampGranularity = proto.Field( + proto.ENUM, + number=4, + enum=TimestampGranularity, + ) + restore_info: 'RestoreInfo' = proto.Field( + proto.MESSAGE, + number=6, + message='RestoreInfo', + ) + change_stream_config: 'ChangeStreamConfig' = proto.Field( + proto.MESSAGE, + number=8, + message='ChangeStreamConfig', + ) + deletion_protection: bool = proto.Field( + proto.BOOL, + number=9, + ) + + +class ColumnFamily(proto.Message): + r"""A set of columns within a table which share a common + configuration. + + Attributes: + gc_rule (google.cloud.bigtable_admin_v2.types.GcRule): + Garbage collection rule specified as a + protobuf. Must serialize to at most 500 bytes. + + NOTE: Garbage collection executes + opportunistically in the background, and so it's + possible for reads to return a cell even if it + matches the active GC expression for its family. + """ + + gc_rule: 'GcRule' = proto.Field( + proto.MESSAGE, + number=1, + message='GcRule', + ) + + +class GcRule(proto.Message): + r"""Rule for determining which cells to delete during garbage + collection. + + This message has `oneof`_ fields (mutually exclusive fields). + For each oneof, at most one member field can be set at the same time. + Setting any member of the oneof automatically clears all other + members. + + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + + Attributes: + max_num_versions (int): + Delete all cells in a column except the most + recent N. + + This field is a member of `oneof`_ ``rule``. + max_age (google.protobuf.duration_pb2.Duration): + Delete cells in a column older than the given + age. Values must be at least one millisecond, + and will be truncated to microsecond + granularity. + + This field is a member of `oneof`_ ``rule``. + intersection (google.cloud.bigtable_admin_v2.types.GcRule.Intersection): + Delete cells that would be deleted by every + nested rule. + + This field is a member of `oneof`_ ``rule``. + union (google.cloud.bigtable_admin_v2.types.GcRule.Union): + Delete cells that would be deleted by any + nested rule. + + This field is a member of `oneof`_ ``rule``. + """ + + class Intersection(proto.Message): + r"""A GcRule which deletes cells matching all of the given rules. + + Attributes: + rules (MutableSequence[google.cloud.bigtable_admin_v2.types.GcRule]): + Only delete cells which would be deleted by every element of + ``rules``. + """ + + rules: MutableSequence['GcRule'] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='GcRule', + ) + + class Union(proto.Message): + r"""A GcRule which deletes cells matching any of the given rules. + + Attributes: + rules (MutableSequence[google.cloud.bigtable_admin_v2.types.GcRule]): + Delete cells which would be deleted by any element of + ``rules``. + """ + + rules: MutableSequence['GcRule'] = proto.RepeatedField( + proto.MESSAGE, + number=1, + message='GcRule', + ) + + max_num_versions: int = proto.Field( + proto.INT32, + number=1, + oneof='rule', + ) + max_age: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=2, + oneof='rule', + message=duration_pb2.Duration, + ) + intersection: Intersection = proto.Field( + proto.MESSAGE, + number=3, + oneof='rule', + message=Intersection, + ) + union: Union = proto.Field( + proto.MESSAGE, + number=4, + oneof='rule', + message=Union, + ) + + +class EncryptionInfo(proto.Message): + r"""Encryption information for a given resource. + If this resource is protected with customer managed encryption, + the in-use Cloud Key Management Service (Cloud KMS) key version + is specified along with its status. + + Attributes: + encryption_type (google.cloud.bigtable_admin_v2.types.EncryptionInfo.EncryptionType): + Output only. The type of encryption used to + protect this resource. + encryption_status (google.rpc.status_pb2.Status): + Output only. The status of encrypt/decrypt + calls on underlying data for this resource. + Regardless of status, the existing data is + always encrypted at rest. + kms_key_version (str): + Output only. The version of the Cloud KMS key + specified in the parent cluster that is in use + for the data underlying this table. + """ + class EncryptionType(proto.Enum): + r"""Possible encryption types for a resource. + + Values: + ENCRYPTION_TYPE_UNSPECIFIED (0): + Encryption type was not specified, though + data at rest remains encrypted. + GOOGLE_DEFAULT_ENCRYPTION (1): + The data backing this resource is encrypted + at rest with a key that is fully managed by + Google. No key version or status will be + populated. This is the default state. + CUSTOMER_MANAGED_ENCRYPTION (2): + The data backing this resource is encrypted at rest with a + key that is managed by the customer. The in-use version of + the key and its status are populated for CMEK-protected + tables. CMEK-protected backups are pinned to the key version + that was in use at the time the backup was taken. This key + version is populated but its status is not tracked and is + reported as ``UNKNOWN``. + """ + ENCRYPTION_TYPE_UNSPECIFIED = 0 + GOOGLE_DEFAULT_ENCRYPTION = 1 + CUSTOMER_MANAGED_ENCRYPTION = 2 + + encryption_type: EncryptionType = proto.Field( + proto.ENUM, + number=3, + enum=EncryptionType, + ) + encryption_status: status_pb2.Status = proto.Field( + proto.MESSAGE, + number=4, + message=status_pb2.Status, + ) + kms_key_version: str = proto.Field( + proto.STRING, + number=2, + ) + + +class Snapshot(proto.Message): + r"""A snapshot of a table at a particular time. A snapshot can be + used as a checkpoint for data restoration or a data source for a + new table. + + Note: This is a private alpha release of Cloud Bigtable + snapshots. This feature is not currently available to most Cloud + Bigtable customers. This feature might be changed in + backward-incompatible ways and is not recommended for production + use. It is not subject to any SLA or deprecation policy. + + Attributes: + name (str): + The unique name of the snapshot. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. + source_table (google.cloud.bigtable_admin_v2.types.Table): + Output only. The source table at the time the + snapshot was taken. + data_size_bytes (int): + Output only. The size of the data in the + source table at the time the snapshot was taken. + In some cases, this value may be computed + asynchronously via a background process and a + placeholder of 0 will be used in the meantime. + create_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time when the snapshot is + created. + delete_time (google.protobuf.timestamp_pb2.Timestamp): + The time when the snapshot will be deleted. + The maximum amount of time a snapshot can stay + active is 365 days. If 'ttl' is not specified, + the default maximum of 365 days will be used. + state (google.cloud.bigtable_admin_v2.types.Snapshot.State): + Output only. The current state of the + snapshot. + description (str): + Description of the snapshot. + """ + class State(proto.Enum): + r"""Possible states of a snapshot. + + Values: + STATE_NOT_KNOWN (0): + The state of the snapshot could not be + determined. + READY (1): + The snapshot has been successfully created + and can serve all requests. + CREATING (2): + The snapshot is currently being created, and + may be destroyed if the creation process + encounters an error. A snapshot may not be + restored to a table while it is being created. + """ + STATE_NOT_KNOWN = 0 + READY = 1 + CREATING = 2 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + source_table: 'Table' = proto.Field( + proto.MESSAGE, + number=2, + message='Table', + ) + data_size_bytes: int = proto.Field( + proto.INT64, + number=3, + ) + create_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + delete_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + state: State = proto.Field( + proto.ENUM, + number=6, + enum=State, + ) + description: str = proto.Field( + proto.STRING, + number=7, + ) + + +class Backup(proto.Message): + r"""A backup of a Cloud Bigtable table. + + Attributes: + name (str): + A globally unique identifier for the backup which cannot be + changed. Values are of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}/ backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` + The final segment of the name must be between 1 and 50 + characters in length. + + The backup is stored in the cluster identified by the prefix + of the backup name of the form + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + source_table (str): + Required. Immutable. Name of the table from which this + backup was created. This needs to be in the same instance as + the backup. Values are of the form + ``projects/{project}/instances/{instance}/tables/{source_table}``. + source_backup (str): + Output only. Name of the backup from which + this backup was copied. If a backup is not + created by copying a backup, this field will be + empty. Values are of the form: + projects//instances//backups/. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Required. The expiration time of the backup, with + microseconds granularity that must be at least 6 hours and + at most 90 days from the time the request is received. Once + the ``expire_time`` has passed, Cloud Bigtable will delete + the backup and free the resources used by the backup. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. ``start_time`` is the time that the backup was + started (i.e. approximately the time the + [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup] + request is received). The row data in this backup will be no + older than this timestamp. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. ``end_time`` is the time that the backup was + finished. The row data in the backup will be no newer than + this timestamp. + size_bytes (int): + Output only. Size of the backup in bytes. + state (google.cloud.bigtable_admin_v2.types.Backup.State): + Output only. The current state of the backup. + encryption_info (google.cloud.bigtable_admin_v2.types.EncryptionInfo): + Output only. The encryption information for + the backup. + """ + class State(proto.Enum): + r"""Indicates the current state of the backup. + + Values: + STATE_UNSPECIFIED (0): + Not specified. + CREATING (1): + The pending backup is still being created. Operations on the + backup may fail with ``FAILED_PRECONDITION`` in this state. + READY (2): + The backup is complete and ready for use. + """ + STATE_UNSPECIFIED = 0 + CREATING = 1 + READY = 2 + + name: str = proto.Field( + proto.STRING, + number=1, + ) + source_table: str = proto.Field( + proto.STRING, + number=2, + ) + source_backup: str = proto.Field( + proto.STRING, + number=10, + ) + expire_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=5, + message=timestamp_pb2.Timestamp, + ) + size_bytes: int = proto.Field( + proto.INT64, + number=6, + ) + state: State = proto.Field( + proto.ENUM, + number=7, + enum=State, + ) + encryption_info: 'EncryptionInfo' = proto.Field( + proto.MESSAGE, + number=9, + message='EncryptionInfo', + ) + + +class BackupInfo(proto.Message): + r"""Information about a backup. + + Attributes: + backup (str): + Output only. Name of the backup. + start_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. The time that the backup was + started. Row data in the backup will be no older + than this timestamp. + end_time (google.protobuf.timestamp_pb2.Timestamp): + Output only. This time that the backup was + finished. Row data in the backup will be no + newer than this timestamp. + source_table (str): + Output only. Name of the table the backup was + created from. + source_backup (str): + Output only. Name of the backup from which + this backup was copied. If a backup is not + created by copying a backup, this field will be + empty. Values are of the form: + projects//instances//backups/. + """ + + backup: str = proto.Field( + proto.STRING, + number=1, + ) + start_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=2, + message=timestamp_pb2.Timestamp, + ) + end_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=3, + message=timestamp_pb2.Timestamp, + ) + source_table: str = proto.Field( + proto.STRING, + number=4, + ) + source_backup: str = proto.Field( + proto.STRING, + number=10, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable_admin/v2/mypy.ini b/owl-bot-staging/bigtable_admin/v2/mypy.ini new file mode 100644 index 000000000..574c5aed3 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/mypy.ini @@ -0,0 +1,3 @@ +[mypy] +python_version = 3.7 +namespace_packages = True diff --git a/owl-bot-staging/bigtable_admin/v2/noxfile.py b/owl-bot-staging/bigtable_admin/v2/noxfile.py new file mode 100644 index 000000000..9207549e2 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/noxfile.py @@ -0,0 +1,184 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +import pathlib +import shutil +import subprocess +import sys + + +import nox # type: ignore + +ALL_PYTHON = [ + "3.7", + "3.8", + "3.9", + "3.10", + "3.11", +] + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() + +LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" +PACKAGE_NAME = subprocess.check_output([sys.executable, "setup.py", "--name"], encoding="utf-8") + +BLACK_VERSION = "black==22.3.0" +BLACK_PATHS = ["docs", "google", "tests", "samples", "noxfile.py", "setup.py"] +DEFAULT_PYTHON_VERSION = "3.11" + +nox.sessions = [ + "unit", + "cover", + "mypy", + "check_lower_bounds" + # exclude update_lower_bounds from default + "docs", + "blacken", + "lint", + "lint_setup_py", +] + +@nox.session(python=ALL_PYTHON) +def unit(session): + """Run the unit test suite.""" + + session.install('coverage', 'pytest', 'pytest-cov', 'pytest-asyncio', 'asyncmock; python_version < "3.8"') + session.install('-e', '.') + + session.run( + 'py.test', + '--quiet', + '--cov=google/cloud/bigtable_admin_v2/', + '--cov=tests/', + '--cov-config=.coveragerc', + '--cov-report=term', + '--cov-report=html', + os.path.join('tests', 'unit', ''.join(session.posargs)) + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def cover(session): + """Run the final coverage report. + This outputs the coverage report aggregating coverage from the unit + test runs (not system test runs), and then erases coverage data. + """ + session.install("coverage", "pytest-cov") + session.run("coverage", "report", "--show-missing", "--fail-under=100") + + session.run("coverage", "erase") + + +@nox.session(python=ALL_PYTHON) +def mypy(session): + """Run the type checker.""" + session.install( + 'mypy', + 'types-requests', + 'types-protobuf' + ) + session.install('.') + session.run( + 'mypy', + '--explicit-package-bases', + 'google', + ) + + +@nox.session +def update_lower_bounds(session): + """Update lower bounds in constraints.txt to match setup.py""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'update', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + + +@nox.session +def check_lower_bounds(session): + """Check lower bounds in setup.py are reflected in constraints file""" + session.install('google-cloud-testutils') + session.install('.') + + session.run( + 'lower-bound-checker', + 'check', + '--package-name', + PACKAGE_NAME, + '--constraints-file', + str(LOWER_BOUND_CONSTRAINTS_FILE), + ) + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def docs(session): + """Build the docs for this library.""" + + session.install("-e", ".") + session.install("sphinx==7.0.1", "alabaster", "recommonmark") + + shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) + session.run( + "sphinx-build", + "-W", # warnings as errors + "-T", # show full traceback on exception + "-N", # no colors + "-b", + "html", + "-d", + os.path.join("docs", "_build", "doctrees", ""), + os.path.join("docs", ""), + os.path.join("docs", "_build", "html", ""), + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint(session): + """Run linters. + + Returns a failure if the linters find linting errors or sufficiently + serious code quality issues. + """ + session.install("flake8", BLACK_VERSION) + session.run( + "black", + "--check", + *BLACK_PATHS, + ) + session.run("flake8", "google", "tests", "samples") + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def blacken(session): + """Run black. Format code to uniform standard.""" + session.install(BLACK_VERSION) + session.run( + "black", + *BLACK_PATHS, + ) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def lint_setup_py(session): + """Verify that setup.py is valid (including RST check).""" + session.install("docutils", "pygments") + session.run("python", "setup.py", "check", "--restructuredtext", "--strict") diff --git a/owl-bot-staging/bigtable_admin/v2/scripts/fixup_bigtable_admin_v2_keywords.py b/owl-bot-staging/bigtable_admin/v2/scripts/fixup_bigtable_admin_v2_keywords.py new file mode 100644 index 000000000..6882feaf6 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/scripts/fixup_bigtable_admin_v2_keywords.py @@ -0,0 +1,218 @@ +#! /usr/bin/env python3 +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import argparse +import os +import libcst as cst +import pathlib +import sys +from typing import (Any, Callable, Dict, List, Sequence, Tuple) + + +def partition( + predicate: Callable[[Any], bool], + iterator: Sequence[Any] +) -> Tuple[List[Any], List[Any]]: + """A stable, out-of-place partition.""" + results = ([], []) + + for i in iterator: + results[int(predicate(i))].append(i) + + # Returns trueList, falseList + return results[1], results[0] + + +class bigtable_adminCallTransformer(cst.CSTTransformer): + CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') + METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { + 'check_consistency': ('name', 'consistency_token', ), + 'copy_backup': ('parent', 'backup_id', 'source_backup', 'expire_time', ), + 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), + 'create_backup': ('parent', 'backup_id', 'backup', ), + 'create_cluster': ('parent', 'cluster_id', 'cluster', ), + 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ), + 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ), + 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ), + 'delete_app_profile': ('name', 'ignore_warnings', ), + 'delete_backup': ('name', ), + 'delete_cluster': ('name', ), + 'delete_instance': ('name', ), + 'delete_snapshot': ('name', ), + 'delete_table': ('name', ), + 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ), + 'generate_consistency_token': ('name', ), + 'get_app_profile': ('name', ), + 'get_backup': ('name', ), + 'get_cluster': ('name', ), + 'get_iam_policy': ('resource', 'options', ), + 'get_instance': ('name', ), + 'get_snapshot': ('name', ), + 'get_table': ('name', 'view', ), + 'list_app_profiles': ('parent', 'page_size', 'page_token', ), + 'list_backups': ('parent', 'filter', 'order_by', 'page_size', 'page_token', ), + 'list_clusters': ('parent', 'page_token', ), + 'list_hot_tablets': ('parent', 'start_time', 'end_time', 'page_size', 'page_token', ), + 'list_instances': ('parent', 'page_token', ), + 'list_snapshots': ('parent', 'page_size', 'page_token', ), + 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), + 'modify_column_families': ('name', 'modifications', ), + 'partial_update_cluster': ('cluster', 'update_mask', ), + 'partial_update_instance': ('instance', 'update_mask', ), + 'restore_table': ('parent', 'table_id', 'backup', ), + 'set_iam_policy': ('resource', 'policy', 'update_mask', ), + 'snapshot_table': ('name', 'cluster', 'snapshot_id', 'ttl', 'description', ), + 'test_iam_permissions': ('resource', 'permissions', ), + 'undelete_table': ('name', ), + 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), + 'update_backup': ('backup', 'update_mask', ), + 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'cluster_config', 'default_storage_type', 'encryption_config', ), + 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', ), + 'update_table': ('table', 'update_mask', ), + } + + def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: + try: + key = original.func.attr.value + kword_params = self.METHOD_TO_PARAMS[key] + except (AttributeError, KeyError): + # Either not a method from the API or too convoluted to be sure. + return updated + + # If the existing code is valid, keyword args come after positional args. + # Therefore, all positional args must map to the first parameters. + args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) + if any(k.keyword.value == "request" for k in kwargs): + # We've already fixed this file, don't fix it again. + return updated + + kwargs, ctrl_kwargs = partition( + lambda a: a.keyword.value not in self.CTRL_PARAMS, + kwargs + ) + + args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] + ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) + for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) + + request_arg = cst.Arg( + value=cst.Dict([ + cst.DictElement( + cst.SimpleString("'{}'".format(name)), +cst.Element(value=arg.value) + ) + # Note: the args + kwargs looks silly, but keep in mind that + # the control parameters had to be stripped out, and that + # those could have been passed positionally or by keyword. + for name, arg in zip(kword_params, args + kwargs)]), + keyword=cst.Name("request") + ) + + return updated.with_changes( + args=[request_arg] + ctrl_kwargs + ) + + +def fix_files( + in_dir: pathlib.Path, + out_dir: pathlib.Path, + *, + transformer=bigtable_adminCallTransformer(), +): + """Duplicate the input dir to the output dir, fixing file method calls. + + Preconditions: + * in_dir is a real directory + * out_dir is a real, empty directory + """ + pyfile_gen = ( + pathlib.Path(os.path.join(root, f)) + for root, _, files in os.walk(in_dir) + for f in files if os.path.splitext(f)[1] == ".py" + ) + + for fpath in pyfile_gen: + with open(fpath, 'r') as f: + src = f.read() + + # Parse the code and insert method call fixes. + tree = cst.parse_module(src) + updated = tree.visit(transformer) + + # Create the path and directory structure for the new file. + updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) + updated_path.parent.mkdir(parents=True, exist_ok=True) + + # Generate the updated source file at the corresponding path. + with open(updated_path, 'w') as f: + f.write(updated.code) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description="""Fix up source that uses the bigtable_admin client library. + +The existing sources are NOT overwritten but are copied to output_dir with changes made. + +Note: This tool operates at a best-effort level at converting positional + parameters in client method calls to keyword based parameters. + Cases where it WILL FAIL include + A) * or ** expansion in a method call. + B) Calls via function or method alias (includes free function calls) + C) Indirect or dispatched calls (e.g. the method is looked up dynamically) + + These all constitute false negatives. The tool will also detect false + positives when an API method shares a name with another method. +""") + parser.add_argument( + '-d', + '--input-directory', + required=True, + dest='input_dir', + help='the input directory to walk for python files to fix up', + ) + parser.add_argument( + '-o', + '--output-directory', + required=True, + dest='output_dir', + help='the directory to output files fixed via un-flattening', + ) + args = parser.parse_args() + input_dir = pathlib.Path(args.input_dir) + output_dir = pathlib.Path(args.output_dir) + if not input_dir.is_dir(): + print( + f"input directory '{input_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if not output_dir.is_dir(): + print( + f"output directory '{output_dir}' does not exist or is not a directory", + file=sys.stderr, + ) + sys.exit(-1) + + if os.listdir(output_dir): + print( + f"output directory '{output_dir}' is not empty", + file=sys.stderr, + ) + sys.exit(-1) + + fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/bigtable_admin/v2/setup.py b/owl-bot-staging/bigtable_admin/v2/setup.py new file mode 100644 index 000000000..00c2149fd --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/setup.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import io +import os + +import setuptools # type: ignore + +package_root = os.path.abspath(os.path.dirname(__file__)) + +name = 'google-cloud-bigtable-admin' + + +description = "Google Cloud Bigtable Admin API client library" + +version = {} +with open(os.path.join(package_root, 'google/cloud/bigtable_admin/gapic_version.py')) as fp: + exec(fp.read(), version) +version = version["__version__"] + +if version[0] == "0": + release_status = "Development Status :: 4 - Beta" +else: + release_status = "Development Status :: 5 - Production/Stable" + +dependencies = [ + "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", + "proto-plus >= 1.22.0, <2.0.0dev", + "proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'", + "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", + "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", +] +url = "https://github.com/googleapis/python-bigtable-admin" + +package_root = os.path.abspath(os.path.dirname(__file__)) + +readme_filename = os.path.join(package_root, "README.rst") +with io.open(readme_filename, encoding="utf-8") as readme_file: + readme = readme_file.read() + +packages = [ + package + for package in setuptools.PEP420PackageFinder.find() + if package.startswith("google") +] + +namespaces = ["google", "google.cloud"] + +setuptools.setup( + name=name, + version=version, + description=description, + long_description=readme, + author="Google LLC", + author_email="googleapis-packages@google.com", + license="Apache 2.0", + url=url, + classifiers=[ + release_status, + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Operating System :: OS Independent", + "Topic :: Internet", + ], + platforms="Posix; MacOS X; Windows", + packages=packages, + python_requires=">=3.7", + namespace_packages=namespaces, + install_requires=dependencies, + include_package_data=True, + zip_safe=False, +) diff --git a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.10.txt b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.10.txt new file mode 100644 index 000000000..ad3f0fa58 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.10.txt @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf +grpc-google-iam-v1 diff --git a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.11.txt b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.11.txt new file mode 100644 index 000000000..ad3f0fa58 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.11.txt @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf +grpc-google-iam-v1 diff --git a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.12.txt b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.12.txt new file mode 100644 index 000000000..ad3f0fa58 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.12.txt @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf +grpc-google-iam-v1 diff --git a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.7.txt b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.7.txt new file mode 100644 index 000000000..2beecf99e --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.7.txt @@ -0,0 +1,10 @@ +# This constraints file is used to check that lower bounds +# are correct in setup.py +# List all library dependencies and extras in this file. +# Pin the version to the lower bound. +# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", +# Then this file should have google-cloud-foo==1.14.0 +google-api-core==1.34.0 +proto-plus==1.22.0 +protobuf==3.19.5 +grpc-google-iam-v1==0.12.4 diff --git a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.8.txt b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.8.txt new file mode 100644 index 000000000..ad3f0fa58 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.8.txt @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf +grpc-google-iam-v1 diff --git a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.9.txt b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.9.txt new file mode 100644 index 000000000..ad3f0fa58 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.9.txt @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- +# This constraints file is required for unit tests. +# List all library dependencies and extras in this file. +google-api-core +proto-plus +protobuf +grpc-google-iam-v1 diff --git a/owl-bot-staging/bigtable_admin/v2/tests/__init__.py b/owl-bot-staging/bigtable_admin/v2/tests/__init__.py new file mode 100644 index 000000000..1b4db446e --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/tests/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/bigtable_admin/v2/tests/unit/__init__.py b/owl-bot-staging/bigtable_admin/v2/tests/unit/__init__.py new file mode 100644 index 000000000..1b4db446e --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/tests/unit/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/__init__.py b/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/__init__.py new file mode 100644 index 000000000..1b4db446e --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/__init__.py b/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/__init__.py new file mode 100644 index 000000000..1b4db446e --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/__init__.py @@ -0,0 +1,16 @@ + +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# diff --git a/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py new file mode 100644 index 000000000..53c3904e7 --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -0,0 +1,12025 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import BigtableInstanceAdminAsyncClient +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import BigtableInstanceAdminClient +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers +from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import transports +from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin +from google.cloud.bigtable_admin_v2.types import common +from google.cloud.bigtable_admin_v2.types import instance +from google.cloud.bigtable_admin_v2.types import instance as gba_instance +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.type import expr_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert BigtableInstanceAdminClient._get_default_mtls_endpoint(None) is None + assert BigtableInstanceAdminClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert BigtableInstanceAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert BigtableInstanceAdminClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (BigtableInstanceAdminClient, "grpc"), + (BigtableInstanceAdminAsyncClient, "grpc_asyncio"), + (BigtableInstanceAdminClient, "rest"), +]) +def test_bigtable_instance_admin_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'bigtableadmin.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://bigtableadmin.googleapis.com' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.BigtableInstanceAdminGrpcTransport, "grpc"), + (transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.BigtableInstanceAdminRestTransport, "rest"), +]) +def test_bigtable_instance_admin_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (BigtableInstanceAdminClient, "grpc"), + (BigtableInstanceAdminAsyncClient, "grpc_asyncio"), + (BigtableInstanceAdminClient, "rest"), +]) +def test_bigtable_instance_admin_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'bigtableadmin.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://bigtableadmin.googleapis.com' + ) + + +def test_bigtable_instance_admin_client_get_transport_class(): + transport = BigtableInstanceAdminClient.get_transport_class() + available_transports = [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminRestTransport, + ] + assert transport in available_transports + + transport = BigtableInstanceAdminClient.get_transport_class("grpc") + assert transport == transports.BigtableInstanceAdminGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc"), + (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio"), + (BigtableInstanceAdminClient, transports.BigtableInstanceAdminRestTransport, "rest"), +]) +@mock.patch.object(BigtableInstanceAdminClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableInstanceAdminClient)) +@mock.patch.object(BigtableInstanceAdminAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableInstanceAdminAsyncClient)) +def test_bigtable_instance_admin_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(BigtableInstanceAdminClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(BigtableInstanceAdminClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions(api_audience="https://language.googleapis.com") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com" + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc", "true"), + (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc", "false"), + (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio", "false"), + (BigtableInstanceAdminClient, transports.BigtableInstanceAdminRestTransport, "rest", "true"), + (BigtableInstanceAdminClient, transports.BigtableInstanceAdminRestTransport, "rest", "false"), +]) +@mock.patch.object(BigtableInstanceAdminClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableInstanceAdminClient)) +@mock.patch.object(BigtableInstanceAdminAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableInstanceAdminAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_bigtable_instance_admin_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class", [ + BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient +]) +@mock.patch.object(BigtableInstanceAdminClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableInstanceAdminClient)) +@mock.patch.object(BigtableInstanceAdminAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableInstanceAdminAsyncClient)) +def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc"), + (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio"), + (BigtableInstanceAdminClient, transports.BigtableInstanceAdminRestTransport, "rest"), +]) +def test_bigtable_instance_admin_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc", grpc_helpers), + (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), + (BigtableInstanceAdminClient, transports.BigtableInstanceAdminRestTransport, "rest", None), +]) +def test_bigtable_instance_admin_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +def test_bigtable_instance_admin_client_client_options_from_dict(): + with mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = BigtableInstanceAdminClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc", grpc_helpers), + (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_bigtable_instance_admin_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "bigtableadmin.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.cluster', + 'https://www.googleapis.com/auth/bigtable.admin.instance', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + scopes=None, + default_host="bigtableadmin.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.CreateInstanceRequest, + dict, +]) +def test_create_instance(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_instance), + '__call__') as call: + client.create_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + +@pytest.mark.asyncio +async def test_create_instance_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.CreateInstanceRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_instance_async_from_dict(): + await test_create_instance_async(request_type=dict) + + +def test_create_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateInstanceRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_instance), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateInstanceRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_instance), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_instance_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_instance( + parent='parent_value', + instance_id='instance_id_value', + instance=gba_instance.Instance(name='name_value'), + clusters={'key_value': gba_instance.Cluster(name='name_value')}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].instance_id + mock_val = 'instance_id_value' + assert arg == mock_val + arg = args[0].instance + mock_val = gba_instance.Instance(name='name_value') + assert arg == mock_val + arg = args[0].clusters + mock_val = {'key_value': gba_instance.Cluster(name='name_value')} + assert arg == mock_val + + +def test_create_instance_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_instance( + bigtable_instance_admin.CreateInstanceRequest(), + parent='parent_value', + instance_id='instance_id_value', + instance=gba_instance.Instance(name='name_value'), + clusters={'key_value': gba_instance.Cluster(name='name_value')}, + ) + +@pytest.mark.asyncio +async def test_create_instance_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_instance( + parent='parent_value', + instance_id='instance_id_value', + instance=gba_instance.Instance(name='name_value'), + clusters={'key_value': gba_instance.Cluster(name='name_value')}, + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].instance_id + mock_val = 'instance_id_value' + assert arg == mock_val + arg = args[0].instance + mock_val = gba_instance.Instance(name='name_value') + assert arg == mock_val + arg = args[0].clusters + mock_val = {'key_value': gba_instance.Cluster(name='name_value')} + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_instance_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_instance( + bigtable_instance_admin.CreateInstanceRequest(), + parent='parent_value', + instance_id='instance_id_value', + instance=gba_instance.Instance(name='name_value'), + clusters={'key_value': gba_instance.Cluster(name='name_value')}, + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.GetInstanceRequest, + dict, +]) +def test_get_instance(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Instance( + name='name_value', + display_name='display_name_value', + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + ) + response = client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == instance.Instance.State.READY + assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True + + +def test_get_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance), + '__call__') as call: + client.get_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetInstanceRequest() + +@pytest.mark.asyncio +async def test_get_instance_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.GetInstanceRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance( + name='name_value', + display_name='display_name_value', + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + )) + response = await client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == instance.Instance.State.READY + assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True + + +@pytest.mark.asyncio +async def test_get_instance_async_from_dict(): + await test_get_instance_async(request_type=dict) + + +def test_get_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetInstanceRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance), + '__call__') as call: + call.return_value = instance.Instance() + client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetInstanceRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) + await client.get_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_instance_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Instance() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_instance( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_instance_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_instance( + bigtable_instance_admin.GetInstanceRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_instance_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Instance() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_instance( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_instance_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_instance( + bigtable_instance_admin.GetInstanceRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.ListInstancesRequest, + dict, +]) +def test_list_instances(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instances), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListInstancesResponse( + failed_locations=['failed_locations_value'], + next_page_token='next_page_token_value', + ) + response = client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListInstancesRequest() + + # Establish that the response is the type that we expect. + assert response.raw_page is response + assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) + assert response.failed_locations == ['failed_locations_value'] + assert response.next_page_token == 'next_page_token_value' + + +def test_list_instances_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instances), + '__call__') as call: + client.list_instances() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListInstancesRequest() + +@pytest.mark.asyncio +async def test_list_instances_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.ListInstancesRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instances), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListInstancesResponse( + failed_locations=['failed_locations_value'], + next_page_token='next_page_token_value', + )) + response = await client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListInstancesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) + assert response.failed_locations == ['failed_locations_value'] + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_instances_async_from_dict(): + await test_list_instances_async(request_type=dict) + + +def test_list_instances_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListInstancesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instances), + '__call__') as call: + call.return_value = bigtable_instance_admin.ListInstancesResponse() + client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_instances_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListInstancesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instances), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListInstancesResponse()) + await client.list_instances(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_instances_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instances), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListInstancesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_instances( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_instances_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_instances( + bigtable_instance_admin.ListInstancesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_instances_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_instances), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListInstancesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListInstancesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_instances( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_instances_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_instances( + bigtable_instance_admin.ListInstancesRequest(), + parent='parent_value', + ) + + +@pytest.mark.parametrize("request_type", [ + instance.Instance, + dict, +]) +def test_update_instance(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Instance( + name='name_value', + display_name='display_name_value', + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + ) + response = client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == instance.Instance() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == instance.Instance.State.READY + assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True + + +def test_update_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_instance), + '__call__') as call: + client.update_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == instance.Instance() + +@pytest.mark.asyncio +async def test_update_instance_async(transport: str = 'grpc_asyncio', request_type=instance.Instance): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance( + name='name_value', + display_name='display_name_value', + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + )) + response = await client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == instance.Instance() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == instance.Instance.State.READY + assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True + + +@pytest.mark.asyncio +async def test_update_instance_async_from_dict(): + await test_update_instance_async(request_type=dict) + + +def test_update_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = instance.Instance() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_instance), + '__call__') as call: + call.return_value = instance.Instance() + client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = instance.Instance() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_instance), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) + await client.update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.PartialUpdateInstanceRequest, + dict, +]) +def test_partial_update_instance(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_partial_update_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), + '__call__') as call: + client.partial_update_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + +@pytest.mark.asyncio +async def test_partial_update_instance_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.PartialUpdateInstanceRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_partial_update_instance_async_from_dict(): + await test_partial_update_instance_async(request_type=dict) + + +def test_partial_update_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + + request.instance.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'instance.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_partial_update_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + + request.instance.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.partial_update_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'instance.name=name_value', + ) in kw['metadata'] + + +def test_partial_update_instance_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.partial_update_instance( + instance=gba_instance.Instance(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].instance + mock_val = gba_instance.Instance(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_partial_update_instance_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.partial_update_instance( + bigtable_instance_admin.PartialUpdateInstanceRequest(), + instance=gba_instance.Instance(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_partial_update_instance_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.partial_update_instance( + instance=gba_instance.Instance(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].instance + mock_val = gba_instance.Instance(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_partial_update_instance_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.partial_update_instance( + bigtable_instance_admin.PartialUpdateInstanceRequest(), + instance=gba_instance.Instance(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.DeleteInstanceRequest, + dict, +]) +def test_delete_instance(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_instance_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_instance), + '__call__') as call: + client.delete_instance() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() + +@pytest.mark.asyncio +async def test_delete_instance_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.DeleteInstanceRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_instance_async_from_dict(): + await test_delete_instance_async(request_type=dict) + + +def test_delete_instance_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteInstanceRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_instance), + '__call__') as call: + call.return_value = None + client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_instance_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteInstanceRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_instance), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_instance(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_instance_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_instance( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_instance_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_instance( + bigtable_instance_admin.DeleteInstanceRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_instance_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_instance), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_instance( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_instance_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_instance( + bigtable_instance_admin.DeleteInstanceRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.CreateClusterRequest, + dict, +]) +def test_create_cluster(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + client.create_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateClusterRequest() + +@pytest.mark.asyncio +async def test_create_cluster_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.CreateClusterRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_cluster_async_from_dict(): + await test_create_cluster_async(request_type=dict) + + +def test_create_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateClusterRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateClusterRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_cluster_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_cluster( + parent='parent_value', + cluster_id='cluster_id_value', + cluster=instance.Cluster(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].cluster_id + mock_val = 'cluster_id_value' + assert arg == mock_val + arg = args[0].cluster + mock_val = instance.Cluster(name='name_value') + assert arg == mock_val + + +def test_create_cluster_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + bigtable_instance_admin.CreateClusterRequest(), + parent='parent_value', + cluster_id='cluster_id_value', + cluster=instance.Cluster(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_cluster_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_cluster( + parent='parent_value', + cluster_id='cluster_id_value', + cluster=instance.Cluster(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].cluster_id + mock_val = 'cluster_id_value' + assert arg == mock_val + arg = args[0].cluster + mock_val = instance.Cluster(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_cluster_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_cluster( + bigtable_instance_admin.CreateClusterRequest(), + parent='parent_value', + cluster_id='cluster_id_value', + cluster=instance.Cluster(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.GetClusterRequest, + dict, +]) +def test_get_cluster(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Cluster( + name='name_value', + location='location_value', + state=instance.Cluster.State.READY, + serve_nodes=1181, + default_storage_type=common.StorageType.SSD, + ) + response = client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Cluster) + assert response.name == 'name_value' + assert response.location == 'location_value' + assert response.state == instance.Cluster.State.READY + assert response.serve_nodes == 1181 + assert response.default_storage_type == common.StorageType.SSD + + +def test_get_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + client.get_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetClusterRequest() + +@pytest.mark.asyncio +async def test_get_cluster_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.GetClusterRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster( + name='name_value', + location='location_value', + state=instance.Cluster.State.READY, + serve_nodes=1181, + default_storage_type=common.StorageType.SSD, + )) + response = await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Cluster) + assert response.name == 'name_value' + assert response.location == 'location_value' + assert response.state == instance.Cluster.State.READY + assert response.serve_nodes == 1181 + assert response.default_storage_type == common.StorageType.SSD + + +@pytest.mark.asyncio +async def test_get_cluster_async_from_dict(): + await test_get_cluster_async(request_type=dict) + + +def test_get_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetClusterRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + call.return_value = instance.Cluster() + client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetClusterRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster()) + await client.get_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_cluster_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Cluster() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_cluster( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_cluster_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + bigtable_instance_admin.GetClusterRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_cluster_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = instance.Cluster() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_cluster( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_cluster_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_cluster( + bigtable_instance_admin.GetClusterRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.ListClustersRequest, + dict, +]) +def test_list_clusters(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListClustersResponse( + failed_locations=['failed_locations_value'], + next_page_token='next_page_token_value', + ) + response = client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert response.raw_page is response + assert isinstance(response, bigtable_instance_admin.ListClustersResponse) + assert response.failed_locations == ['failed_locations_value'] + assert response.next_page_token == 'next_page_token_value' + + +def test_list_clusters_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + client.list_clusters() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListClustersRequest() + +@pytest.mark.asyncio +async def test_list_clusters_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.ListClustersRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListClustersResponse( + failed_locations=['failed_locations_value'], + next_page_token='next_page_token_value', + )) + response = await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListClustersRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListClustersResponse) + assert response.failed_locations == ['failed_locations_value'] + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_clusters_async_from_dict(): + await test_list_clusters_async(request_type=dict) + + +def test_list_clusters_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListClustersRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + call.return_value = bigtable_instance_admin.ListClustersResponse() + client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_clusters_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListClustersRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListClustersResponse()) + await client.list_clusters(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_clusters_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListClustersResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_clusters( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_clusters_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + bigtable_instance_admin.ListClustersRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_clusters_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_clusters), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListClustersResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListClustersResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_clusters( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_clusters_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_clusters( + bigtable_instance_admin.ListClustersRequest(), + parent='parent_value', + ) + + +@pytest.mark.parametrize("request_type", [ + instance.Cluster, + dict, +]) +def test_update_cluster(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == instance.Cluster() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + client.update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == instance.Cluster() + +@pytest.mark.asyncio +async def test_update_cluster_async(transport: str = 'grpc_asyncio', request_type=instance.Cluster): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == instance.Cluster() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_cluster_async_from_dict(): + await test_update_cluster_async(request_type=dict) + + +def test_update_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = instance.Cluster() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = instance.Cluster() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.PartialUpdateClusterRequest, + dict, +]) +def test_partial_update_cluster(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_partial_update_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), + '__call__') as call: + client.partial_update_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() + +@pytest.mark.asyncio +async def test_partial_update_cluster_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.PartialUpdateClusterRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_partial_update_cluster_async_from_dict(): + await test_partial_update_cluster_async(request_type=dict) + + +def test_partial_update_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.PartialUpdateClusterRequest() + + request.cluster.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'cluster.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_partial_update_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.PartialUpdateClusterRequest() + + request.cluster.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.partial_update_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'cluster.name=name_value', + ) in kw['metadata'] + + +def test_partial_update_cluster_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.partial_update_cluster( + cluster=instance.Cluster(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].cluster + mock_val = instance.Cluster(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_partial_update_cluster_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.partial_update_cluster( + bigtable_instance_admin.PartialUpdateClusterRequest(), + cluster=instance.Cluster(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_partial_update_cluster_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.partial_update_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.partial_update_cluster( + cluster=instance.Cluster(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].cluster + mock_val = instance.Cluster(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_partial_update_cluster_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.partial_update_cluster( + bigtable_instance_admin.PartialUpdateClusterRequest(), + cluster=instance.Cluster(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.DeleteClusterRequest, + dict, +]) +def test_delete_cluster(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_cluster_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + client.delete_cluster() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + +@pytest.mark.asyncio +async def test_delete_cluster_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.DeleteClusterRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteClusterRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_cluster_async_from_dict(): + await test_delete_cluster_async(request_type=dict) + + +def test_delete_cluster_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteClusterRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + call.return_value = None + client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_cluster_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteClusterRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_cluster(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_cluster_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_cluster( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_cluster_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + bigtable_instance_admin.DeleteClusterRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_cluster), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_cluster( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_cluster_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_cluster( + bigtable_instance_admin.DeleteClusterRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.CreateAppProfileRequest, + dict, +]) +def test_create_app_profile(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile( + name='name_value', + etag='etag_value', + description='description_value', + priority=instance.AppProfile.Priority.PRIORITY_LOW, + ) + response = client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + assert response.name == 'name_value' + assert response.etag == 'etag_value' + assert response.description == 'description_value' + + +def test_create_app_profile_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), + '__call__') as call: + client.create_app_profile() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + +@pytest.mark.asyncio +async def test_create_app_profile_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.CreateAppProfileRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile( + name='name_value', + etag='etag_value', + description='description_value', + )) + response = await client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + assert response.name == 'name_value' + assert response.etag == 'etag_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_create_app_profile_async_from_dict(): + await test_create_app_profile_async(request_type=dict) + + +def test_create_app_profile_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateAppProfileRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), + '__call__') as call: + call.return_value = instance.AppProfile() + client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_app_profile_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.CreateAppProfileRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) + await client.create_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_app_profile_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_app_profile( + parent='parent_value', + app_profile_id='app_profile_id_value', + app_profile=instance.AppProfile(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + arg = args[0].app_profile + mock_val = instance.AppProfile(name='name_value') + assert arg == mock_val + + +def test_create_app_profile_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_app_profile( + bigtable_instance_admin.CreateAppProfileRequest(), + parent='parent_value', + app_profile_id='app_profile_id_value', + app_profile=instance.AppProfile(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_app_profile_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_app_profile( + parent='parent_value', + app_profile_id='app_profile_id_value', + app_profile=instance.AppProfile(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].app_profile_id + mock_val = 'app_profile_id_value' + assert arg == mock_val + arg = args[0].app_profile + mock_val = instance.AppProfile(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_app_profile_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_app_profile( + bigtable_instance_admin.CreateAppProfileRequest(), + parent='parent_value', + app_profile_id='app_profile_id_value', + app_profile=instance.AppProfile(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.GetAppProfileRequest, + dict, +]) +def test_get_app_profile(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile( + name='name_value', + etag='etag_value', + description='description_value', + priority=instance.AppProfile.Priority.PRIORITY_LOW, + ) + response = client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + assert response.name == 'name_value' + assert response.etag == 'etag_value' + assert response.description == 'description_value' + + +def test_get_app_profile_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_app_profile), + '__call__') as call: + client.get_app_profile() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + +@pytest.mark.asyncio +async def test_get_app_profile_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.GetAppProfileRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile( + name='name_value', + etag='etag_value', + description='description_value', + )) + response = await client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.GetAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + assert response.name == 'name_value' + assert response.etag == 'etag_value' + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_app_profile_async_from_dict(): + await test_get_app_profile_async(request_type=dict) + + +def test_get_app_profile_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetAppProfileRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_app_profile), + '__call__') as call: + call.return_value = instance.AppProfile() + client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_app_profile_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.GetAppProfileRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_app_profile), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) + await client.get_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_app_profile_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_app_profile( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_app_profile_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_app_profile( + bigtable_instance_admin.GetAppProfileRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_app_profile_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = instance.AppProfile() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_app_profile( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_app_profile_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_app_profile( + bigtable_instance_admin.GetAppProfileRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.ListAppProfilesRequest, + dict, +]) +def test_list_app_profiles(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListAppProfilesResponse( + next_page_token='next_page_token_value', + failed_locations=['failed_locations_value'], + ) + response = client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAppProfilesPager) + assert response.next_page_token == 'next_page_token_value' + assert response.failed_locations == ['failed_locations_value'] + + +def test_list_app_profiles_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + '__call__') as call: + client.list_app_profiles() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + +@pytest.mark.asyncio +async def test_list_app_profiles_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.ListAppProfilesRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListAppProfilesResponse( + next_page_token='next_page_token_value', + failed_locations=['failed_locations_value'], + )) + response = await client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAppProfilesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + assert response.failed_locations == ['failed_locations_value'] + + +@pytest.mark.asyncio +async def test_list_app_profiles_async_from_dict(): + await test_list_app_profiles_async(request_type=dict) + + +def test_list_app_profiles_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListAppProfilesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + '__call__') as call: + call.return_value = bigtable_instance_admin.ListAppProfilesResponse() + client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_app_profiles_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListAppProfilesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListAppProfilesResponse()) + await client.list_app_profiles(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_app_profiles_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListAppProfilesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_app_profiles( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_app_profiles_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_app_profiles( + bigtable_instance_admin.ListAppProfilesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_app_profiles_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListAppProfilesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListAppProfilesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_app_profiles( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_app_profiles_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_app_profiles( + bigtable_instance_admin.ListAppProfilesRequest(), + parent='parent_value', + ) + + +def test_list_app_profiles_pager(transport_name: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token='abc', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], + next_page_token='def', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + ], + next_page_token='ghi', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_app_profiles(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.AppProfile) + for i in results) +def test_list_app_profiles_pages(transport_name: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token='abc', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], + next_page_token='def', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + ], + next_page_token='ghi', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + ], + ), + RuntimeError, + ) + pages = list(client.list_app_profiles(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_app_profiles_async_pager(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token='abc', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], + next_page_token='def', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + ], + next_page_token='ghi', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_app_profiles(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, instance.AppProfile) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_app_profiles_async_pages(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_app_profiles), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token='abc', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], + next_page_token='def', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + ], + next_page_token='ghi', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_app_profiles(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.UpdateAppProfileRequest, + dict, +]) +def test_update_app_profile(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_app_profile_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), + '__call__') as call: + client.update_app_profile() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + +@pytest.mark.asyncio +async def test_update_app_profile_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.UpdateAppProfileRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_app_profile_async_from_dict(): + await test_update_app_profile_async(request_type=dict) + + +def test_update_app_profile_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateAppProfileRequest() + + request.app_profile.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'app_profile.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_app_profile_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.UpdateAppProfileRequest() + + request.app_profile.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'app_profile.name=name_value', + ) in kw['metadata'] + + +def test_update_app_profile_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_app_profile( + app_profile=instance.AppProfile(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].app_profile + mock_val = instance.AppProfile(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_app_profile_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_app_profile( + bigtable_instance_admin.UpdateAppProfileRequest(), + app_profile=instance.AppProfile(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_app_profile_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_app_profile( + app_profile=instance.AppProfile(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].app_profile + mock_val = instance.AppProfile(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_app_profile_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_app_profile( + bigtable_instance_admin.UpdateAppProfileRequest(), + app_profile=instance.AppProfile(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.DeleteAppProfileRequest, + dict, +]) +def test_delete_app_profile(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_app_profile_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), + '__call__') as call: + client.delete_app_profile() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + +@pytest.mark.asyncio +async def test_delete_app_profile_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.DeleteAppProfileRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_app_profile_async_from_dict(): + await test_delete_app_profile_async(request_type=dict) + + +def test_delete_app_profile_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteAppProfileRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), + '__call__') as call: + call.return_value = None + client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_app_profile_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.DeleteAppProfileRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_app_profile(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_app_profile_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_app_profile( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_app_profile_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_app_profile( + bigtable_instance_admin.DeleteAppProfileRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_app_profile_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_app_profile), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_app_profile( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_app_profile_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_app_profile( + bigtable_instance_admin.DeleteAppProfileRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + iam_policy_pb2.GetIamPolicyRequest, + dict, +]) +def test_get_iam_policy(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b'etag_blob', + ) + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b'etag_blob' + + +def test_get_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + client.get_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.GetIamPolicyRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy( + version=774, + etag=b'etag_blob', + )) + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b'etag_blob' + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_from_dict(): + await test_get_iam_policy_async(request_type=dict) + + +def test_get_iam_policy_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + + request.resource = 'resource_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + call.return_value = policy_pb2.Policy() + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'resource=resource_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + + request.resource = 'resource_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'resource=resource_value', + ) in kw['metadata'] + +def test_get_iam_policy_from_dict_foreign(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + response = client.get_iam_policy(request={ + 'resource': 'resource_value', + 'options': options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_get_iam_policy_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_iam_policy( + resource='resource_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = 'resource_value' + assert arg == mock_val + + +def test_get_iam_policy_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource='resource_value', + ) + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_iam_policy( + resource='resource_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = 'resource_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource='resource_value', + ) + + +@pytest.mark.parametrize("request_type", [ + iam_policy_pb2.SetIamPolicyRequest, + dict, +]) +def test_set_iam_policy(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b'etag_blob', + ) + response = client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b'etag_blob' + + +def test_set_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + client.set_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.SetIamPolicyRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy( + version=774, + etag=b'etag_blob', + )) + response = await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b'etag_blob' + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_from_dict(): + await test_set_iam_policy_async(request_type=dict) + + +def test_set_iam_policy_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + + request.resource = 'resource_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + call.return_value = policy_pb2.Policy() + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'resource=resource_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + + request.resource = 'resource_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'resource=resource_value', + ) in kw['metadata'] + +def test_set_iam_policy_from_dict_foreign(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + response = client.set_iam_policy(request={ + 'resource': 'resource_value', + 'policy': policy_pb2.Policy(version=774), + 'update_mask': field_mask_pb2.FieldMask(paths=['paths_value']), + } + ) + call.assert_called() + + +def test_set_iam_policy_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_iam_policy( + resource='resource_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = 'resource_value' + assert arg == mock_val + + +def test_set_iam_policy_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource='resource_value', + ) + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_iam_policy( + resource='resource_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = 'resource_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource='resource_value', + ) + + +@pytest.mark.parametrize("request_type", [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, +]) +def test_test_iam_permissions(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=['permissions_value'], + ) + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + client.test_iam_permissions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.TestIamPermissionsRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse( + permissions=['permissions_value'], + )) + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_from_dict(): + await test_test_iam_permissions_async(request_type=dict) + + +def test_test_iam_permissions_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + + request.resource = 'resource_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'resource=resource_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + + request.resource = 'resource_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse()) + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'resource=resource_value', + ) in kw['metadata'] + +def test_test_iam_permissions_from_dict_foreign(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + response = client.test_iam_permissions(request={ + 'resource': 'resource_value', + 'permissions': ['permissions_value'], + } + ) + call.assert_called() + + +def test_test_iam_permissions_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.test_iam_permissions( + resource='resource_value', + permissions=['permissions_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = 'resource_value' + assert arg == mock_val + arg = args[0].permissions + mock_val = ['permissions_value'] + assert arg == mock_val + + +def test_test_iam_permissions_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource='resource_value', + permissions=['permissions_value'], + ) + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.test_iam_permissions( + resource='resource_value', + permissions=['permissions_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = 'resource_value' + assert arg == mock_val + arg = args[0].permissions + mock_val = ['permissions_value'] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource='resource_value', + permissions=['permissions_value'], + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.ListHotTabletsRequest, + dict, +]) +def test_list_hot_tablets(request_type, transport: str = 'grpc'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hot_tablets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListHotTabletsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHotTabletsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_hot_tablets_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hot_tablets), + '__call__') as call: + client.list_hot_tablets() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() + +@pytest.mark.asyncio +async def test_list_hot_tablets_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.ListHotTabletsRequest): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hot_tablets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListHotTabletsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHotTabletsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_hot_tablets_async_from_dict(): + await test_list_hot_tablets_async(request_type=dict) + + +def test_list_hot_tablets_field_headers(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListHotTabletsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hot_tablets), + '__call__') as call: + call.return_value = bigtable_instance_admin.ListHotTabletsResponse() + client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_hot_tablets_field_headers_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_instance_admin.ListHotTabletsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hot_tablets), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListHotTabletsResponse()) + await client.list_hot_tablets(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_hot_tablets_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hot_tablets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListHotTabletsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_hot_tablets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_hot_tablets_flattened_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_hot_tablets( + bigtable_instance_admin.ListHotTabletsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_hot_tablets_flattened_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hot_tablets), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_instance_admin.ListHotTabletsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListHotTabletsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_hot_tablets( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_hot_tablets_flattened_error_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_hot_tablets( + bigtable_instance_admin.ListHotTabletsRequest(), + parent='parent_value', + ) + + +def test_list_hot_tablets_pager(transport_name: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hot_tablets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + instance.HotTablet(), + ], + next_page_token='abc', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[], + next_page_token='def', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + ], + next_page_token='ghi', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_hot_tablets(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.HotTablet) + for i in results) +def test_list_hot_tablets_pages(transport_name: str = "grpc"): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hot_tablets), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + instance.HotTablet(), + ], + next_page_token='abc', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[], + next_page_token='def', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + ], + next_page_token='ghi', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + ], + ), + RuntimeError, + ) + pages = list(client.list_hot_tablets(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_hot_tablets_async_pager(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hot_tablets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + instance.HotTablet(), + ], + next_page_token='abc', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[], + next_page_token='def', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + ], + next_page_token='ghi', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_hot_tablets(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, instance.HotTablet) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_hot_tablets_async_pages(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_hot_tablets), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + instance.HotTablet(), + ], + next_page_token='abc', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[], + next_page_token='def', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + ], + next_page_token='ghi', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_hot_tablets(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.CreateInstanceRequest, + dict, +]) +def test_create_instance_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_instance(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_instance_rest_required_fields(request_type=bigtable_instance_admin.CreateInstanceRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["instance_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + jsonified_request["instanceId"] = 'instance_id_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + assert "instanceId" in jsonified_request + assert jsonified_request["instanceId"] == 'instance_id_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.create_instance(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_create_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.create_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("parent", "instanceId", "instance", "clusters", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_create_instance") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_create_instance") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.CreateInstanceRequest.pb(bigtable_instance_admin.CreateInstanceRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = bigtable_instance_admin.CreateInstanceRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_instance(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_instance_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.CreateInstanceRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_instance(request) + + +def test_create_instance_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + instance_id='instance_id_value', + instance=gba_instance.Instance(name='name_value'), + clusters={'key_value': gba_instance.Cluster(name='name_value')}, + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.create_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1]) + + +def test_create_instance_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_instance( + bigtable_instance_admin.CreateInstanceRequest(), + parent='parent_value', + instance_id='instance_id_value', + instance=gba_instance.Instance(name='name_value'), + clusters={'key_value': gba_instance.Cluster(name='name_value')}, + ) + + +def test_create_instance_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.GetInstanceRequest, + dict, +]) +def test_get_instance_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = instance.Instance( + name='name_value', + display_name='display_name_value', + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_instance(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == instance.Instance.State.READY + assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True + + +def test_get_instance_rest_required_fields(request_type=bigtable_instance_admin.GetInstanceRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.Instance() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_instance(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_get_instance") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_get_instance") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.GetInstanceRequest.pb(bigtable_instance_admin.GetInstanceRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = instance.Instance.to_json(instance.Instance()) + + request = bigtable_instance_admin.GetInstanceRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.Instance() + + client.get_instance(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_instance_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.GetInstanceRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_instance(request) + + +def test_get_instance_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = instance.Instance() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1]) + + +def test_get_instance_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_instance( + bigtable_instance_admin.GetInstanceRequest(), + name='name_value', + ) + + +def test_get_instance_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.ListInstancesRequest, + dict, +]) +def test_list_instances_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListInstancesResponse( + failed_locations=['failed_locations_value'], + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_instances(request) + + assert response.raw_page is response + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) + assert response.failed_locations == ['failed_locations_value'] + assert response.next_page_token == 'next_page_token_value' + + +def test_list_instances_rest_required_fields(request_type=bigtable_instance_admin.ListInstancesRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_instances._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_instances._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListInstancesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_instances(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_instances_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_instances._get_unset_required_fields({}) + assert set(unset_fields) == (set(("pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_instances_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_list_instances") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_list_instances") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.ListInstancesRequest.pb(bigtable_instance_admin.ListInstancesRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable_instance_admin.ListInstancesResponse.to_json(bigtable_instance_admin.ListInstancesResponse()) + + request = bigtable_instance_admin.ListInstancesRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListInstancesResponse() + + client.list_instances(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_instances_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.ListInstancesRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_instances(request) + + +def test_list_instances_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListInstancesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_instances(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1]) + + +def test_list_instances_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_instances( + bigtable_instance_admin.ListInstancesRequest(), + parent='parent_value', + ) + + +def test_list_instances_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + instance.Instance, + dict, +]) +def test_update_instance_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = instance.Instance( + name='name_value', + display_name='display_name_value', + state=instance.Instance.State.READY, + type_=instance.Instance.Type.PRODUCTION, + satisfies_pzs=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_instance(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Instance) + assert response.name == 'name_value' + assert response.display_name == 'display_name_value' + assert response.state == instance.Instance.State.READY + assert response.type_ == instance.Instance.Type.PRODUCTION + assert response.satisfies_pzs is True + + +def test_update_instance_rest_required_fields(request_type=instance.Instance): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["display_name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["displayName"] = 'display_name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "displayName" in jsonified_request + assert jsonified_request["displayName"] == 'display_name_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.Instance() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "put", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.update_instance(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_update_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.update_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("displayName", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_update_instance") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_update_instance") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = instance.Instance.pb(instance.Instance()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = instance.Instance.to_json(instance.Instance()) + + request = instance.Instance() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.Instance() + + client.update_instance(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_instance_rest_bad_request(transport: str = 'rest', request_type=instance.Instance): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_instance(request) + + +def test_update_instance_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.PartialUpdateInstanceRequest, + dict, +]) +def test_partial_update_instance_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'instance': {'name': 'projects/sample1/instances/sample2'}} + request_init["instance"] = {'name': 'projects/sample1/instances/sample2', 'display_name': 'display_name_value', 'state': 1, 'type_': 1, 'labels': {}, 'create_time': {'seconds': 751, 'nanos': 543}, 'satisfies_pzs': True} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.PartialUpdateInstanceRequest.meta.fields["instance"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["instance"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["instance"][field])): + del request_init["instance"][field][i][subfield] + else: + del request_init["instance"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.partial_update_instance(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_partial_update_instance_rest_required_fields(request_type=bigtable_instance_admin.PartialUpdateInstanceRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).partial_update_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).partial_update_instance._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "patch", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.partial_update_instance(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_partial_update_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.partial_update_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask", )) & set(("instance", "updateMask", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_partial_update_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_instance") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_instance") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.PartialUpdateInstanceRequest.pb(bigtable_instance_admin.PartialUpdateInstanceRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = bigtable_instance_admin.PartialUpdateInstanceRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.partial_update_instance(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_partial_update_instance_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.PartialUpdateInstanceRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'instance': {'name': 'projects/sample1/instances/sample2'}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.partial_update_instance(request) + + +def test_partial_update_instance_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'instance': {'name': 'projects/sample1/instances/sample2'}} + + # get truthy value for each flattened field + mock_args = dict( + instance=gba_instance.Instance(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.partial_update_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{instance.name=projects/*/instances/*}" % client.transport._host, args[1]) + + +def test_partial_update_instance_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.partial_update_instance( + bigtable_instance_admin.PartialUpdateInstanceRequest(), + instance=gba_instance.Instance(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_partial_update_instance_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.DeleteInstanceRequest, + dict, +]) +def test_delete_instance_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_instance(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_instance_rest_required_fields(request_type=bigtable_instance_admin.DeleteInstanceRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_instance._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "delete", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_instance(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_delete_instance_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.delete_instance._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_instance_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_delete_instance") as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteInstanceRequest.pb(bigtable_instance_admin.DeleteInstanceRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_instance_admin.DeleteInstanceRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_instance(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + + +def test_delete_instance_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.DeleteInstanceRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_instance(request) + + +def test_delete_instance_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.delete_instance(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1]) + + +def test_delete_instance_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_instance( + bigtable_instance_admin.DeleteInstanceRequest(), + name='name_value', + ) + + +def test_delete_instance_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.CreateClusterRequest, + dict, +]) +def test_create_cluster_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request_init["cluster"] = {'name': 'name_value', 'location': 'location_value', 'state': 1, 'serve_nodes': 1181, 'cluster_config': {'cluster_autoscaling_config': {'autoscaling_limits': {'min_serve_nodes': 1600, 'max_serve_nodes': 1602}, 'autoscaling_targets': {'cpu_utilization_percent': 2483, 'storage_utilization_gib_per_node': 3404}}}, 'default_storage_type': 1, 'encryption_config': {'kms_key_name': 'kms_key_name_value'}} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateClusterRequest.meta.fields["cluster"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_cluster(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_cluster_rest_required_fields(request_type=bigtable_instance_admin.CreateClusterRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["cluster_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + assert "clusterId" not in jsonified_request + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "clusterId" in jsonified_request + assert jsonified_request["clusterId"] == request_init["cluster_id"] + + jsonified_request["parent"] = 'parent_value' + jsonified_request["clusterId"] = 'cluster_id_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("cluster_id", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + assert "clusterId" in jsonified_request + assert jsonified_request["clusterId"] == 'cluster_id_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.create_cluster(request) + + expected_params = [ + ( + "clusterId", + "", + ), + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_create_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.create_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(("clusterId", )) & set(("parent", "clusterId", "cluster", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_create_cluster") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_create_cluster") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.CreateClusterRequest.pb(bigtable_instance_admin.CreateClusterRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = bigtable_instance_admin.CreateClusterRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_cluster_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.CreateClusterRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_cluster(request) + + +def test_create_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/instances/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + cluster_id='cluster_id_value', + cluster=instance.Cluster(name='name_value'), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.create_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, args[1]) + + +def test_create_cluster_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_cluster( + bigtable_instance_admin.CreateClusterRequest(), + parent='parent_value', + cluster_id='cluster_id_value', + cluster=instance.Cluster(name='name_value'), + ) + + +def test_create_cluster_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.GetClusterRequest, + dict, +]) +def test_get_cluster_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = instance.Cluster( + name='name_value', + location='location_value', + state=instance.Cluster.State.READY, + serve_nodes=1181, + default_storage_type=common.StorageType.SSD, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_cluster(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.Cluster) + assert response.name == 'name_value' + assert response.location == 'location_value' + assert response.state == instance.Cluster.State.READY + assert response.serve_nodes == 1181 + assert response.default_storage_type == common.StorageType.SSD + + +def test_get_cluster_rest_required_fields(request_type=bigtable_instance_admin.GetClusterRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.Cluster() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_cluster(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_get_cluster") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_get_cluster") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.GetClusterRequest.pb(bigtable_instance_admin.GetClusterRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = instance.Cluster.to_json(instance.Cluster()) + + request = bigtable_instance_admin.GetClusterRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.Cluster() + + client.get_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_cluster_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.GetClusterRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_cluster(request) + + +def test_get_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = instance.Cluster() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, args[1]) + + +def test_get_cluster_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_cluster( + bigtable_instance_admin.GetClusterRequest(), + name='name_value', + ) + + +def test_get_cluster_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.ListClustersRequest, + dict, +]) +def test_list_clusters_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListClustersResponse( + failed_locations=['failed_locations_value'], + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_clusters(request) + + assert response.raw_page is response + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_instance_admin.ListClustersResponse) + assert response.failed_locations == ['failed_locations_value'] + assert response.next_page_token == 'next_page_token_value' + + +def test_list_clusters_rest_required_fields(request_type=bigtable_instance_admin.ListClustersRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_clusters._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_clusters._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListClustersResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_clusters(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_clusters_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_clusters._get_unset_required_fields({}) + assert set(unset_fields) == (set(("pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_clusters_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_list_clusters") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_list_clusters") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.ListClustersRequest.pb(bigtable_instance_admin.ListClustersRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable_instance_admin.ListClustersResponse.to_json(bigtable_instance_admin.ListClustersResponse()) + + request = bigtable_instance_admin.ListClustersRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListClustersResponse() + + client.list_clusters(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_clusters_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.ListClustersRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_clusters(request) + + +def test_list_clusters_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListClustersResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/instances/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_clusters(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, args[1]) + + +def test_list_clusters_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_clusters( + bigtable_instance_admin.ListClustersRequest(), + parent='parent_value', + ) + + +def test_list_clusters_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + instance.Cluster, + dict, +]) +def test_update_cluster_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_cluster(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_update_cluster") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_update_cluster") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = instance.Cluster.pb(instance.Cluster()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = instance.Cluster() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_cluster_rest_bad_request(transport: str = 'rest', request_type=instance.Cluster): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_cluster(request) + + +def test_update_cluster_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.PartialUpdateClusterRequest, + dict, +]) +def test_partial_update_cluster_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'cluster': {'name': 'projects/sample1/instances/sample2/clusters/sample3'}} + request_init["cluster"] = {'name': 'projects/sample1/instances/sample2/clusters/sample3', 'location': 'location_value', 'state': 1, 'serve_nodes': 1181, 'cluster_config': {'cluster_autoscaling_config': {'autoscaling_limits': {'min_serve_nodes': 1600, 'max_serve_nodes': 1602}, 'autoscaling_targets': {'cpu_utilization_percent': 2483, 'storage_utilization_gib_per_node': 3404}}}, 'default_storage_type': 1, 'encryption_config': {'kms_key_name': 'kms_key_name_value'}} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.PartialUpdateClusterRequest.meta.fields["cluster"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.partial_update_cluster(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_partial_update_cluster_rest_required_fields(request_type=bigtable_instance_admin.PartialUpdateClusterRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).partial_update_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).partial_update_cluster._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "patch", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.partial_update_cluster(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_partial_update_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.partial_update_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask", )) & set(("cluster", "updateMask", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_partial_update_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_cluster") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_cluster") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.PartialUpdateClusterRequest.pb(bigtable_instance_admin.PartialUpdateClusterRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = bigtable_instance_admin.PartialUpdateClusterRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.partial_update_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_partial_update_cluster_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.PartialUpdateClusterRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'cluster': {'name': 'projects/sample1/instances/sample2/clusters/sample3'}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.partial_update_cluster(request) + + +def test_partial_update_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'cluster': {'name': 'projects/sample1/instances/sample2/clusters/sample3'}} + + # get truthy value for each flattened field + mock_args = dict( + cluster=instance.Cluster(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.partial_update_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{cluster.name=projects/*/instances/*/clusters/*}" % client.transport._host, args[1]) + + +def test_partial_update_cluster_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.partial_update_cluster( + bigtable_instance_admin.PartialUpdateClusterRequest(), + cluster=instance.Cluster(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_partial_update_cluster_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.DeleteClusterRequest, + dict, +]) +def test_delete_cluster_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_cluster(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_cluster_rest_required_fields(request_type=bigtable_instance_admin.DeleteClusterRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_cluster._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "delete", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_cluster(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_delete_cluster_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.delete_cluster._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_cluster_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_delete_cluster") as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteClusterRequest.pb(bigtable_instance_admin.DeleteClusterRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_instance_admin.DeleteClusterRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + + +def test_delete_cluster_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.DeleteClusterRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_cluster(request) + + +def test_delete_cluster_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.delete_cluster(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, args[1]) + + +def test_delete_cluster_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_cluster( + bigtable_instance_admin.DeleteClusterRequest(), + name='name_value', + ) + + +def test_delete_cluster_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.CreateAppProfileRequest, + dict, +]) +def test_create_app_profile_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request_init["app_profile"] = {'name': 'name_value', 'etag': 'etag_value', 'description': 'description_value', 'multi_cluster_routing_use_any': {'cluster_ids': ['cluster_ids_value1', 'cluster_ids_value2']}, 'single_cluster_routing': {'cluster_id': 'cluster_id_value', 'allow_transactional_writes': True}, 'priority': 1, 'standard_isolation': {'priority': 1}} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateAppProfileRequest.meta.fields["app_profile"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["app_profile"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["app_profile"][field])): + del request_init["app_profile"][field][i][subfield] + else: + del request_init["app_profile"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile( + name='name_value', + etag='etag_value', + description='description_value', + priority=instance.AppProfile.Priority.PRIORITY_LOW, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_app_profile(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + assert response.name == 'name_value' + assert response.etag == 'etag_value' + assert response.description == 'description_value' + + +def test_create_app_profile_rest_required_fields(request_type=bigtable_instance_admin.CreateAppProfileRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["app_profile_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + assert "appProfileId" not in jsonified_request + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "appProfileId" in jsonified_request + assert jsonified_request["appProfileId"] == request_init["app_profile_id"] + + jsonified_request["parent"] = 'parent_value' + jsonified_request["appProfileId"] = 'app_profile_id_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_app_profile._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("app_profile_id", "ignore_warnings", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + assert "appProfileId" in jsonified_request + assert jsonified_request["appProfileId"] == 'app_profile_id_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.create_app_profile(request) + + expected_params = [ + ( + "appProfileId", + "", + ), + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_create_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.create_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == (set(("appProfileId", "ignoreWarnings", )) & set(("parent", "appProfileId", "appProfile", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_create_app_profile") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_create_app_profile") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.CreateAppProfileRequest.pb(bigtable_instance_admin.CreateAppProfileRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = instance.AppProfile.to_json(instance.AppProfile()) + + request = bigtable_instance_admin.CreateAppProfileRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.AppProfile() + + client.create_app_profile(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_app_profile_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.CreateAppProfileRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_app_profile(request) + + +def test_create_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/instances/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + app_profile_id='app_profile_id_value', + app_profile=instance.AppProfile(name='name_value'), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.create_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/appProfiles" % client.transport._host, args[1]) + + +def test_create_app_profile_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_app_profile( + bigtable_instance_admin.CreateAppProfileRequest(), + parent='parent_value', + app_profile_id='app_profile_id_value', + app_profile=instance.AppProfile(name='name_value'), + ) + + +def test_create_app_profile_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.GetAppProfileRequest, + dict, +]) +def test_get_app_profile_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile( + name='name_value', + etag='etag_value', + description='description_value', + priority=instance.AppProfile.Priority.PRIORITY_LOW, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_app_profile(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, instance.AppProfile) + assert response.name == 'name_value' + assert response.etag == 'etag_value' + assert response.description == 'description_value' + + +def test_get_app_profile_rest_required_fields(request_type=bigtable_instance_admin.GetAppProfileRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_app_profile(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_get_app_profile") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_get_app_profile") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.GetAppProfileRequest.pb(bigtable_instance_admin.GetAppProfileRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = instance.AppProfile.to_json(instance.AppProfile()) + + request = bigtable_instance_admin.GetAppProfileRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = instance.AppProfile() + + client.get_app_profile(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_app_profile_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.GetAppProfileRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_app_profile(request) + + +def test_get_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = instance.AppProfile() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/appProfiles/*}" % client.transport._host, args[1]) + + +def test_get_app_profile_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_app_profile( + bigtable_instance_admin.GetAppProfileRequest(), + name='name_value', + ) + + +def test_get_app_profile_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.ListAppProfilesRequest, + dict, +]) +def test_list_app_profiles_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListAppProfilesResponse( + next_page_token='next_page_token_value', + failed_locations=['failed_locations_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_app_profiles(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListAppProfilesPager) + assert response.next_page_token == 'next_page_token_value' + assert response.failed_locations == ['failed_locations_value'] + + +def test_list_app_profiles_rest_required_fields(request_type=bigtable_instance_admin.ListAppProfilesRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_app_profiles._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_app_profiles._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListAppProfilesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_app_profiles(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_app_profiles_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_app_profiles._get_unset_required_fields({}) + assert set(unset_fields) == (set(("pageSize", "pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_app_profiles_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_list_app_profiles") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_list_app_profiles") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.ListAppProfilesRequest.pb(bigtable_instance_admin.ListAppProfilesRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable_instance_admin.ListAppProfilesResponse.to_json(bigtable_instance_admin.ListAppProfilesResponse()) + + request = bigtable_instance_admin.ListAppProfilesRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListAppProfilesResponse() + + client.list_app_profiles(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_app_profiles_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.ListAppProfilesRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_app_profiles(request) + + +def test_list_app_profiles_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListAppProfilesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/instances/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_app_profiles(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/appProfiles" % client.transport._host, args[1]) + + +def test_list_app_profiles_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_app_profiles( + bigtable_instance_admin.ListAppProfilesRequest(), + parent='parent_value', + ) + + +def test_list_app_profiles_rest_pager(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + instance.AppProfile(), + ], + next_page_token='abc', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[], + next_page_token='def', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + ], + next_page_token='ghi', + ), + bigtable_instance_admin.ListAppProfilesResponse( + app_profiles=[ + instance.AppProfile(), + instance.AppProfile(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(bigtable_instance_admin.ListAppProfilesResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/instances/sample2'} + + pager = client.list_app_profiles(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.AppProfile) + for i in results) + + pages = list(client.list_app_profiles(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.UpdateAppProfileRequest, + dict, +]) +def test_update_app_profile_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'app_profile': {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'}} + request_init["app_profile"] = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3', 'etag': 'etag_value', 'description': 'description_value', 'multi_cluster_routing_use_any': {'cluster_ids': ['cluster_ids_value1', 'cluster_ids_value2']}, 'single_cluster_routing': {'cluster_id': 'cluster_id_value', 'allow_transactional_writes': True}, 'priority': 1, 'standard_isolation': {'priority': 1}} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.UpdateAppProfileRequest.meta.fields["app_profile"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["app_profile"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["app_profile"][field])): + del request_init["app_profile"][field][i][subfield] + else: + del request_init["app_profile"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_app_profile(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_update_app_profile_rest_required_fields(request_type=bigtable_instance_admin.UpdateAppProfileRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_app_profile._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("ignore_warnings", "update_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "patch", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.update_app_profile(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_update_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.update_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == (set(("ignoreWarnings", "updateMask", )) & set(("appProfile", "updateMask", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_update_app_profile") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_update_app_profile") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.UpdateAppProfileRequest.pb(bigtable_instance_admin.UpdateAppProfileRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = bigtable_instance_admin.UpdateAppProfileRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_app_profile(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_app_profile_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.UpdateAppProfileRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'app_profile': {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_app_profile(request) + + +def test_update_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'app_profile': {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'}} + + # get truthy value for each flattened field + mock_args = dict( + app_profile=instance.AppProfile(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.update_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" % client.transport._host, args[1]) + + +def test_update_app_profile_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_app_profile( + bigtable_instance_admin.UpdateAppProfileRequest(), + app_profile=instance.AppProfile(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_update_app_profile_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.DeleteAppProfileRequest, + dict, +]) +def test_delete_app_profile_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_app_profile(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_app_profile_rest_required_fields(request_type=bigtable_instance_admin.DeleteAppProfileRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["name"] = "" + request_init["ignore_warnings"] = False + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + assert "ignoreWarnings" not in jsonified_request + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_app_profile._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "ignoreWarnings" in jsonified_request + assert jsonified_request["ignoreWarnings"] == request_init["ignore_warnings"] + + jsonified_request["name"] = 'name_value' + jsonified_request["ignoreWarnings"] = True + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_app_profile._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("ignore_warnings", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + assert "ignoreWarnings" in jsonified_request + assert jsonified_request["ignoreWarnings"] == True + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "delete", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_app_profile(request) + + expected_params = [ + ( + "ignoreWarnings", + str(False).lower(), + ), + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_delete_app_profile_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.delete_app_profile._get_unset_required_fields({}) + assert set(unset_fields) == (set(("ignoreWarnings", )) & set(("name", "ignoreWarnings", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_app_profile_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_delete_app_profile") as pre: + pre.assert_not_called() + pb_message = bigtable_instance_admin.DeleteAppProfileRequest.pb(bigtable_instance_admin.DeleteAppProfileRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_instance_admin.DeleteAppProfileRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_app_profile(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + + +def test_delete_app_profile_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.DeleteAppProfileRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_app_profile(request) + + +def test_delete_app_profile_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.delete_app_profile(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/appProfiles/*}" % client.transport._host, args[1]) + + +def test_delete_app_profile_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_app_profile( + bigtable_instance_admin.DeleteAppProfileRequest(), + name='name_value', + ) + + +def test_delete_app_profile_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + iam_policy_pb2.GetIamPolicyRequest, + dict, +]) +def test_get_iam_policy_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'resource': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy( + version=774, + etag=b'etag_blob', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b'etag_blob' + + +def test_get_iam_policy_rest_required_fields(request_type=iam_policy_pb2.GetIamPolicyRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = 'resource_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == 'resource_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_iam_policy(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("resource", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_iam_policy_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_get_iam_policy") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_get_iam_policy") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.GetIamPolicyRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + + request = iam_policy_pb2.GetIamPolicyRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = policy_pb2.Policy() + + client.get_iam_policy(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=iam_policy_pb2.GetIamPolicyRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'resource': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = {'resource': 'projects/sample1/instances/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + resource='resource_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{resource=projects/*/instances/*}:getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource='resource_value', + ) + + +def test_get_iam_policy_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + iam_policy_pb2.SetIamPolicyRequest, + dict, +]) +def test_set_iam_policy_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'resource': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy( + version=774, + etag=b'etag_blob', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b'etag_blob' + + +def test_set_iam_policy_rest_required_fields(request_type=iam_policy_pb2.SetIamPolicyRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = 'resource_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == 'resource_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.set_iam_policy(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_set_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.set_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("resource", "policy", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_set_iam_policy_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_set_iam_policy") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_set_iam_policy") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.SetIamPolicyRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + + request = iam_policy_pb2.SetIamPolicyRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = policy_pb2.Policy() + + client.set_iam_policy(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=iam_policy_pb2.SetIamPolicyRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'resource': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = {'resource': 'projects/sample1/instances/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + resource='resource_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{resource=projects/*/instances/*}:setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource='resource_value', + ) + + +def test_set_iam_policy_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, +]) +def test_test_iam_permissions_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'resource': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_required_fields(request_type=iam_policy_pb2.TestIamPermissionsRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request_init["permissions"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = 'resource_value' + jsonified_request["permissions"] = 'permissions_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == 'resource_value' + assert "permissions" in jsonified_request + assert jsonified_request["permissions"] == 'permissions_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.test_iam_permissions(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_test_iam_permissions_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("resource", "permissions", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_test_iam_permissions_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_test_iam_permissions") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_test_iam_permissions") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.TestIamPermissionsRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(iam_policy_pb2.TestIamPermissionsResponse()) + + request = iam_policy_pb2.TestIamPermissionsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=iam_policy_pb2.TestIamPermissionsRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'resource': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'resource': 'projects/sample1/instances/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + resource='resource_value', + permissions=['permissions_value'], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{resource=projects/*/instances/*}:testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource='resource_value', + permissions=['permissions_value'], + ) + + +def test_test_iam_permissions_rest_error(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_instance_admin.ListHotTabletsRequest, + dict, +]) +def test_list_hot_tablets_rest(request_type): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListHotTabletsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_hot_tablets(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListHotTabletsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_hot_tablets_rest_required_fields(request_type=bigtable_instance_admin.ListHotTabletsRequest): + transport_class = transports.BigtableInstanceAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_hot_tablets._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_hot_tablets._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("end_time", "page_size", "page_token", "start_time", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListHotTabletsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_hot_tablets(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_hot_tablets_rest_unset_required_fields(): + transport = transports.BigtableInstanceAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_hot_tablets._get_unset_required_fields({}) + assert set(unset_fields) == (set(("endTime", "pageSize", "pageToken", "startTime", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_hot_tablets_rest_interceptors(null_interceptor): + transport = transports.BigtableInstanceAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), + ) + client = BigtableInstanceAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_list_hot_tablets") as post, \ + mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_list_hot_tablets") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_instance_admin.ListHotTabletsRequest.pb(bigtable_instance_admin.ListHotTabletsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable_instance_admin.ListHotTabletsResponse.to_json(bigtable_instance_admin.ListHotTabletsResponse()) + + request = bigtable_instance_admin.ListHotTabletsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_instance_admin.ListHotTabletsResponse() + + client.list_hot_tablets(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_hot_tablets_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.ListHotTabletsRequest): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_hot_tablets(request) + + +def test_list_hot_tablets_rest_flattened(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_instance_admin.ListHotTabletsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_hot_tablets(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets" % client.transport._host, args[1]) + + +def test_list_hot_tablets_rest_flattened_error(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_hot_tablets( + bigtable_instance_admin.ListHotTabletsRequest(), + parent='parent_value', + ) + + +def test_list_hot_tablets_rest_pager(transport: str = 'rest'): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + instance.HotTablet(), + ], + next_page_token='abc', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[], + next_page_token='def', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + ], + next_page_token='ghi', + ), + bigtable_instance_admin.ListHotTabletsResponse( + hot_tablets=[ + instance.HotTablet(), + instance.HotTablet(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(bigtable_instance_admin.ListHotTabletsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + + pager = client.list_hot_tablets(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, instance.HotTablet) + for i in results) + + pages = list(client.list_hot_tablets(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableInstanceAdminClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BigtableInstanceAdminClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableInstanceAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + transports.BigtableInstanceAdminRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "rest", +]) +def test_transport_kind(transport_name): + transport = BigtableInstanceAdminClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.BigtableInstanceAdminGrpcTransport, + ) + +def test_bigtable_instance_admin_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.BigtableInstanceAdminTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_bigtable_instance_admin_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.BigtableInstanceAdminTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_instance', + 'get_instance', + 'list_instances', + 'update_instance', + 'partial_update_instance', + 'delete_instance', + 'create_cluster', + 'get_cluster', + 'list_clusters', + 'update_cluster', + 'partial_update_cluster', + 'delete_cluster', + 'create_app_profile', + 'get_app_profile', + 'list_app_profiles', + 'update_app_profile', + 'delete_app_profile', + 'get_iam_policy', + 'set_iam_policy', + 'test_iam_permissions', + 'list_hot_tablets', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_bigtable_instance_admin_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BigtableInstanceAdminTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.cluster', + 'https://www.googleapis.com/auth/bigtable.admin.instance', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + quota_project_id="octopus", + ) + + +def test_bigtable_instance_admin_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BigtableInstanceAdminTransport() + adc.assert_called_once() + + +def test_bigtable_instance_admin_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + BigtableInstanceAdminClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.cluster', + 'https://www.googleapis.com/auth/bigtable.admin.instance', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_instance_admin_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/bigtable.admin', 'https://www.googleapis.com/auth/bigtable.admin.cluster', 'https://www.googleapis.com/auth/bigtable.admin.instance', 'https://www.googleapis.com/auth/cloud-bigtable.admin', 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/cloud-platform.read-only',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableInstanceAdminGrpcTransport, + transports.BigtableInstanceAdminGrpcAsyncIOTransport, + transports.BigtableInstanceAdminRestTransport, + ], +) +def test_bigtable_instance_admin_transport_auth_gdch_credentials(transport_class): + host = 'https://language.com' + api_audience_tests = [None, 'https://language2.com'] + api_audience_expect = [host, 'https://language2.com'] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with( + e + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.BigtableInstanceAdminGrpcTransport, grpc_helpers), + (transports.BigtableInstanceAdminGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_bigtable_instance_admin_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "bigtableadmin.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.cluster', + 'https://www.googleapis.com/auth/bigtable.admin.instance', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + scopes=["1", "2"], + default_host="bigtableadmin.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.BigtableInstanceAdminGrpcTransport, transports.BigtableInstanceAdminGrpcAsyncIOTransport]) +def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + +def test_bigtable_instance_admin_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.BigtableInstanceAdminRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_bigtable_instance_admin_rest_lro_client(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_bigtable_instance_admin_host_no_port(transport_name): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='bigtableadmin.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'bigtableadmin.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://bigtableadmin.googleapis.com' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_bigtable_instance_admin_host_with_port(transport_name): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='bigtableadmin.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'bigtableadmin.googleapis.com:8000' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://bigtableadmin.googleapis.com:8000' + ) + +@pytest.mark.parametrize("transport_name", [ + "rest", +]) +def test_bigtable_instance_admin_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = BigtableInstanceAdminClient( + credentials=creds1, + transport=transport_name, + ) + client2 = BigtableInstanceAdminClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_instance._session + session2 = client2.transport.create_instance._session + assert session1 != session2 + session1 = client1.transport.get_instance._session + session2 = client2.transport.get_instance._session + assert session1 != session2 + session1 = client1.transport.list_instances._session + session2 = client2.transport.list_instances._session + assert session1 != session2 + session1 = client1.transport.update_instance._session + session2 = client2.transport.update_instance._session + assert session1 != session2 + session1 = client1.transport.partial_update_instance._session + session2 = client2.transport.partial_update_instance._session + assert session1 != session2 + session1 = client1.transport.delete_instance._session + session2 = client2.transport.delete_instance._session + assert session1 != session2 + session1 = client1.transport.create_cluster._session + session2 = client2.transport.create_cluster._session + assert session1 != session2 + session1 = client1.transport.get_cluster._session + session2 = client2.transport.get_cluster._session + assert session1 != session2 + session1 = client1.transport.list_clusters._session + session2 = client2.transport.list_clusters._session + assert session1 != session2 + session1 = client1.transport.update_cluster._session + session2 = client2.transport.update_cluster._session + assert session1 != session2 + session1 = client1.transport.partial_update_cluster._session + session2 = client2.transport.partial_update_cluster._session + assert session1 != session2 + session1 = client1.transport.delete_cluster._session + session2 = client2.transport.delete_cluster._session + assert session1 != session2 + session1 = client1.transport.create_app_profile._session + session2 = client2.transport.create_app_profile._session + assert session1 != session2 + session1 = client1.transport.get_app_profile._session + session2 = client2.transport.get_app_profile._session + assert session1 != session2 + session1 = client1.transport.list_app_profiles._session + session2 = client2.transport.list_app_profiles._session + assert session1 != session2 + session1 = client1.transport.update_app_profile._session + session2 = client2.transport.update_app_profile._session + assert session1 != session2 + session1 = client1.transport.delete_app_profile._session + session2 = client2.transport.delete_app_profile._session + assert session1 != session2 + session1 = client1.transport.get_iam_policy._session + session2 = client2.transport.get_iam_policy._session + assert session1 != session2 + session1 = client1.transport.set_iam_policy._session + session2 = client2.transport.set_iam_policy._session + assert session1 != session2 + session1 = client1.transport.test_iam_permissions._session + session2 = client2.transport.test_iam_permissions._session + assert session1 != session2 + session1 = client1.transport.list_hot_tablets._session + session2 = client2.transport.list_hot_tablets._session + assert session1 != session2 +def test_bigtable_instance_admin_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableInstanceAdminGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_bigtable_instance_admin_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.BigtableInstanceAdminGrpcTransport, transports.BigtableInstanceAdminGrpcAsyncIOTransport]) +def test_bigtable_instance_admin_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.BigtableInstanceAdminGrpcTransport, transports.BigtableInstanceAdminGrpcAsyncIOTransport]) +def test_bigtable_instance_admin_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_bigtable_instance_admin_grpc_lro_client(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_bigtable_instance_admin_grpc_lro_async_client(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_app_profile_path(): + project = "squid" + instance = "clam" + app_profile = "whelk" + expected = "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format(project=project, instance=instance, app_profile=app_profile, ) + actual = BigtableInstanceAdminClient.app_profile_path(project, instance, app_profile) + assert expected == actual + + +def test_parse_app_profile_path(): + expected = { + "project": "octopus", + "instance": "oyster", + "app_profile": "nudibranch", + } + path = BigtableInstanceAdminClient.app_profile_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_app_profile_path(path) + assert expected == actual + +def test_cluster_path(): + project = "cuttlefish" + instance = "mussel" + cluster = "winkle" + expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format(project=project, instance=instance, cluster=cluster, ) + actual = BigtableInstanceAdminClient.cluster_path(project, instance, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "nautilus", + "instance": "scallop", + "cluster": "abalone", + } + path = BigtableInstanceAdminClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_cluster_path(path) + assert expected == actual + +def test_crypto_key_path(): + project = "squid" + location = "clam" + key_ring = "whelk" + crypto_key = "octopus" + expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format(project=project, location=location, key_ring=key_ring, crypto_key=crypto_key, ) + actual = BigtableInstanceAdminClient.crypto_key_path(project, location, key_ring, crypto_key) + assert expected == actual + + +def test_parse_crypto_key_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + "key_ring": "cuttlefish", + "crypto_key": "mussel", + } + path = BigtableInstanceAdminClient.crypto_key_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_crypto_key_path(path) + assert expected == actual + +def test_hot_tablet_path(): + project = "winkle" + instance = "nautilus" + cluster = "scallop" + hot_tablet = "abalone" + expected = "projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/{hot_tablet}".format(project=project, instance=instance, cluster=cluster, hot_tablet=hot_tablet, ) + actual = BigtableInstanceAdminClient.hot_tablet_path(project, instance, cluster, hot_tablet) + assert expected == actual + + +def test_parse_hot_tablet_path(): + expected = { + "project": "squid", + "instance": "clam", + "cluster": "whelk", + "hot_tablet": "octopus", + } + path = BigtableInstanceAdminClient.hot_tablet_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_hot_tablet_path(path) + assert expected == actual + +def test_instance_path(): + project = "oyster" + instance = "nudibranch" + expected = "projects/{project}/instances/{instance}".format(project=project, instance=instance, ) + actual = BigtableInstanceAdminClient.instance_path(project, instance) + assert expected == actual + + +def test_parse_instance_path(): + expected = { + "project": "cuttlefish", + "instance": "mussel", + } + path = BigtableInstanceAdminClient.instance_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_instance_path(path) + assert expected == actual + +def test_table_path(): + project = "winkle" + instance = "nautilus" + table = "scallop" + expected = "projects/{project}/instances/{instance}/tables/{table}".format(project=project, instance=instance, table=table, ) + actual = BigtableInstanceAdminClient.table_path(project, instance, table) + assert expected == actual + + +def test_parse_table_path(): + expected = { + "project": "abalone", + "instance": "squid", + "table": "clam", + } + path = BigtableInstanceAdminClient.table_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_table_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "whelk" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = BigtableInstanceAdminClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "octopus", + } + path = BigtableInstanceAdminClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "oyster" + expected = "folders/{folder}".format(folder=folder, ) + actual = BigtableInstanceAdminClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nudibranch", + } + path = BigtableInstanceAdminClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "cuttlefish" + expected = "organizations/{organization}".format(organization=organization, ) + actual = BigtableInstanceAdminClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "mussel", + } + path = BigtableInstanceAdminClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "winkle" + expected = "projects/{project}".format(project=project, ) + actual = BigtableInstanceAdminClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "nautilus", + } + path = BigtableInstanceAdminClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "scallop" + location = "abalone" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = BigtableInstanceAdminClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "squid", + "location": "clam", + } + path = BigtableInstanceAdminClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableInstanceAdminClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.BigtableInstanceAdminTransport, '_prep_wrapped_messages') as prep: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.BigtableInstanceAdminTransport, '_prep_wrapped_messages') as prep: + transport_class = BigtableInstanceAdminClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = BigtableInstanceAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + 'grpc', + ] + for transport in transports: + client = BigtableInstanceAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport), + (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) diff --git a/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py new file mode 100644 index 000000000..2fff0f14d --- /dev/null +++ b/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -0,0 +1,14140 @@ +# -*- coding: utf-8 -*- +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +import os +# try/except added for compatibility with python < 3.8 +try: + from unittest import mock + from unittest.mock import AsyncMock # pragma: NO COVER +except ImportError: # pragma: NO COVER + import mock + +import grpc +from grpc.experimental import aio +from collections.abc import Iterable +from google.protobuf import json_format +import json +import math +import pytest +from proto.marshal.rules.dates import DurationRule, TimestampRule +from proto.marshal.rules import wrappers +from requests import Response +from requests import Request, PreparedRequest +from requests.sessions import Session +from google.protobuf import json_format + +from google.api_core import client_options +from google.api_core import exceptions as core_exceptions +from google.api_core import future +from google.api_core import gapic_v1 +from google.api_core import grpc_helpers +from google.api_core import grpc_helpers_async +from google.api_core import operation +from google.api_core import operation_async # type: ignore +from google.api_core import operations_v1 +from google.api_core import path_template +from google.auth import credentials as ga_credentials +from google.auth.exceptions import MutualTLSChannelError +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import BigtableTableAdminAsyncClient +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import BigtableTableAdminClient +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers +from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports +from google.cloud.bigtable_admin_v2.types import bigtable_table_admin +from google.cloud.bigtable_admin_v2.types import table +from google.cloud.bigtable_admin_v2.types import table as gba_table +from google.iam.v1 import iam_policy_pb2 # type: ignore +from google.iam.v1 import options_pb2 # type: ignore +from google.iam.v1 import policy_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore +from google.oauth2 import service_account +from google.protobuf import any_pb2 # type: ignore +from google.protobuf import duration_pb2 # type: ignore +from google.protobuf import field_mask_pb2 # type: ignore +from google.protobuf import timestamp_pb2 # type: ignore +from google.rpc import status_pb2 # type: ignore +from google.type import expr_pb2 # type: ignore +import google.auth + + +def client_cert_source_callback(): + return b"cert bytes", b"key bytes" + + +# If default endpoint is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint(client): + return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT + + +def test__get_default_mtls_endpoint(): + api_endpoint = "example.googleapis.com" + api_mtls_endpoint = "example.mtls.googleapis.com" + sandbox_endpoint = "example.sandbox.googleapis.com" + sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" + non_googleapi = "api.example.com" + + assert BigtableTableAdminClient._get_default_mtls_endpoint(None) is None + assert BigtableTableAdminClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint + assert BigtableTableAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint + assert BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint + assert BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint + assert BigtableTableAdminClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi + + +@pytest.mark.parametrize("client_class,transport_name", [ + (BigtableTableAdminClient, "grpc"), + (BigtableTableAdminAsyncClient, "grpc_asyncio"), + (BigtableTableAdminClient, "rest"), +]) +def test_bigtable_table_admin_client_from_service_account_info(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: + factory.return_value = creds + info = {"valid": True} + client = client_class.from_service_account_info(info, transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'bigtableadmin.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://bigtableadmin.googleapis.com' + ) + + +@pytest.mark.parametrize("transport_class,transport_name", [ + (transports.BigtableTableAdminGrpcTransport, "grpc"), + (transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio"), + (transports.BigtableTableAdminRestTransport, "rest"), +]) +def test_bigtable_table_admin_client_service_account_always_use_jwt(transport_class, transport_name): + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=True) + use_jwt.assert_called_once_with(True) + + with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: + creds = service_account.Credentials(None, None, None) + transport = transport_class(credentials=creds, always_use_jwt_access=False) + use_jwt.assert_not_called() + + +@pytest.mark.parametrize("client_class,transport_name", [ + (BigtableTableAdminClient, "grpc"), + (BigtableTableAdminAsyncClient, "grpc_asyncio"), + (BigtableTableAdminClient, "rest"), +]) +def test_bigtable_table_admin_client_from_service_account_file(client_class, transport_name): + creds = ga_credentials.AnonymousCredentials() + with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: + factory.return_value = creds + client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) + assert client.transport._credentials == creds + assert isinstance(client, client_class) + + assert client.transport._host == ( + 'bigtableadmin.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else + 'https://bigtableadmin.googleapis.com' + ) + + +def test_bigtable_table_admin_client_get_transport_class(): + transport = BigtableTableAdminClient.get_transport_class() + available_transports = [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminRestTransport, + ] + assert transport in available_transports + + transport = BigtableTableAdminClient.get_transport_class("grpc") + assert transport == transports.BigtableTableAdminGrpcTransport + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), + (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio"), + (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), +]) +@mock.patch.object(BigtableTableAdminClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableTableAdminClient)) +@mock.patch.object(BigtableTableAdminAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableTableAdminAsyncClient)) +def test_bigtable_table_admin_client_client_options(client_class, transport_class, transport_name): + # Check that if channel is provided we won't create a new one. + with mock.patch.object(BigtableTableAdminClient, 'get_transport_class') as gtc: + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ) + client = client_class(transport=transport) + gtc.assert_not_called() + + # Check that if channel is provided via str we will create a new one. + with mock.patch.object(BigtableTableAdminClient, 'get_transport_class') as gtc: + client = client_class(transport=transport_name) + gtc.assert_called() + + # Check the case api_endpoint is provided. + options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name, client_options=options) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is + # "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_MTLS_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError): + client = client_class(transport=transport_name) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): + with pytest.raises(ValueError): + client = client_class(transport=transport_name) + + # Check the case quota_project_id is provided + options = client_options.ClientOptions(quota_project_id="octopus") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id="octopus", + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + # Check the case api_endpoint is provided + options = client_options.ClientOptions(api_audience="https://language.googleapis.com") + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience="https://language.googleapis.com" + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", "true"), + (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", "true"), + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", "false"), + (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", "false"), + (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest", "true"), + (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest", "false"), +]) +@mock.patch.object(BigtableTableAdminClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableTableAdminClient)) +@mock.patch.object(BigtableTableAdminAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableTableAdminAsyncClient)) +@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) +def test_bigtable_table_admin_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): + # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default + # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. + + # Check the case client_cert_source is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + + if use_client_cert_env == "false": + expected_client_cert_source = None + expected_host = client.DEFAULT_ENDPOINT + else: + expected_client_cert_source = client_cert_source_callback + expected_host = client.DEFAULT_MTLS_ENDPOINT + + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case ADC client cert is provided. Whether client cert is used depends on + # GOOGLE_API_USE_CLIENT_CERTIFICATE value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): + if use_client_cert_env == "false": + expected_host = client.DEFAULT_ENDPOINT + expected_client_cert_source = None + else: + expected_host = client.DEFAULT_MTLS_ENDPOINT + expected_client_cert_source = client_cert_source_callback + + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=expected_host, + scopes=None, + client_cert_source_for_mtls=expected_client_cert_source, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # Check the case client_cert_source and ADC client cert are not provided. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): + with mock.patch.object(transport_class, '__init__') as patched: + with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): + patched.return_value = None + client = client_class(transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class", [ + BigtableTableAdminClient, BigtableTableAdminAsyncClient +]) +@mock.patch.object(BigtableTableAdminClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableTableAdminClient)) +@mock.patch.object(BigtableTableAdminAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableTableAdminAsyncClient)) +def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_class): + mock_client_cert_source = mock.Mock() + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source == mock_client_cert_source + + # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + mock_client_cert_source = mock.Mock() + mock_api_endpoint = "foo" + options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) + assert api_endpoint == mock_api_endpoint + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_ENDPOINT + assert cert_source is None + + # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): + with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): + api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() + assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + assert cert_source == mock_client_cert_source + + +@pytest.mark.parametrize("client_class,transport_class,transport_name", [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), + (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio"), + (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), +]) +def test_bigtable_table_admin_client_client_options_scopes(client_class, transport_class, transport_name): + # Check the case scopes are provided. + options = client_options.ClientOptions( + scopes=["1", "2"], + ) + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=["1", "2"], + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", grpc_helpers), + (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), + (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest", None), +]) +def test_bigtable_table_admin_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + +def test_bigtable_table_admin_client_client_options_from_dict(): + with mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminGrpcTransport.__init__') as grpc_transport: + grpc_transport.return_value = None + client = BigtableTableAdminClient( + client_options={'api_endpoint': 'squid.clam.whelk'} + ) + grpc_transport.assert_called_once_with( + credentials=None, + credentials_file=None, + host="squid.clam.whelk", + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + +@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", grpc_helpers), + (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), +]) +def test_bigtable_table_admin_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): + # Check the case credentials file is provided. + options = client_options.ClientOptions( + credentials_file="credentials.json" + ) + + with mock.patch.object(transport_class, '__init__') as patched: + patched.return_value = None + client = client_class(client_options=options, transport=transport_name) + patched.assert_called_once_with( + credentials=None, + credentials_file="credentials.json", + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + ) + + # test that the credentials from file are saved and used as the credentials. + with mock.patch.object( + google.auth, "load_credentials_from_file", autospec=True + ) as load_creds, mock.patch.object( + google.auth, "default", autospec=True + ) as adc, mock.patch.object( + grpc_helpers, "create_channel" + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + file_creds = ga_credentials.AnonymousCredentials() + load_creds.return_value = (file_creds, None) + adc.return_value = (creds, None) + client = client_class(client_options=options, transport=transport_name) + create_channel.assert_called_with( + "bigtableadmin.googleapis.com:443", + credentials=file_creds, + credentials_file=None, + quota_project_id=None, + default_scopes=( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + scopes=None, + default_host="bigtableadmin.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.CreateTableRequest, + dict, +]) +def test_create_table(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gba_table.Table( + name='name_value', + granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + response = client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gba_table.Table) + assert response.name == 'name_value' + assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +def test_create_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table), + '__call__') as call: + client.create_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateTableRequest() + +@pytest.mark.asyncio +async def test_create_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.CreateTableRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gba_table.Table( + name='name_value', + granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + )) + response = await client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, gba_table.Table) + assert response.name == 'name_value' + assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +@pytest.mark.asyncio +async def test_create_table_async_from_dict(): + await test_create_table_async(request_type=dict) + + +def test_create_table_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateTableRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table), + '__call__') as call: + call.return_value = gba_table.Table() + client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateTableRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gba_table.Table()) + await client.create_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_table_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gba_table.Table() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_table( + parent='parent_value', + table_id='table_id_value', + table=gba_table.Table(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].table_id + mock_val = 'table_id_value' + assert arg == mock_val + arg = args[0].table + mock_val = gba_table.Table(name='name_value') + assert arg == mock_val + + +def test_create_table_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table( + bigtable_table_admin.CreateTableRequest(), + parent='parent_value', + table_id='table_id_value', + table=gba_table.Table(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = gba_table.Table() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gba_table.Table()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_table( + parent='parent_value', + table_id='table_id_value', + table=gba_table.Table(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].table_id + mock_val = 'table_id_value' + assert arg == mock_val + arg = args[0].table + mock_val = gba_table.Table(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_table( + bigtable_table_admin.CreateTableRequest(), + parent='parent_value', + table_id='table_id_value', + table=gba_table.Table(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.CreateTableFromSnapshotRequest, + dict, +]) +def test_create_table_from_snapshot(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_table_from_snapshot_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), + '__call__') as call: + client.create_table_from_snapshot() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.CreateTableFromSnapshotRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_async_from_dict(): + await test_create_table_from_snapshot_async(request_type=dict) + + +def test_create_table_from_snapshot_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_table_from_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_table_from_snapshot_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_table_from_snapshot( + parent='parent_value', + table_id='table_id_value', + source_snapshot='source_snapshot_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].table_id + mock_val = 'table_id_value' + assert arg == mock_val + arg = args[0].source_snapshot + mock_val = 'source_snapshot_value' + assert arg == mock_val + + +def test_create_table_from_snapshot_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table_from_snapshot( + bigtable_table_admin.CreateTableFromSnapshotRequest(), + parent='parent_value', + table_id='table_id_value', + source_snapshot='source_snapshot_value', + ) + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_table_from_snapshot), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_table_from_snapshot( + parent='parent_value', + table_id='table_id_value', + source_snapshot='source_snapshot_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].table_id + mock_val = 'table_id_value' + assert arg == mock_val + arg = args[0].source_snapshot + mock_val = 'source_snapshot_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_table_from_snapshot_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_table_from_snapshot( + bigtable_table_admin.CreateTableFromSnapshotRequest(), + parent='parent_value', + table_id='table_id_value', + source_snapshot='source_snapshot_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.ListTablesRequest, + dict, +]) +def test_list_tables(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListTablesResponse( + next_page_token='next_page_token_value', + ) + response = client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListTablesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTablesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tables_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), + '__call__') as call: + client.list_tables() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListTablesRequest() + +@pytest.mark.asyncio +async def test_list_tables_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.ListTablesRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListTablesResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListTablesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTablesAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_tables_async_from_dict(): + await test_list_tables_async(request_type=dict) + + +def test_list_tables_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListTablesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), + '__call__') as call: + call.return_value = bigtable_table_admin.ListTablesResponse() + client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_tables_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListTablesRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListTablesResponse()) + await client.list_tables(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_tables_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListTablesResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_tables( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_tables_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tables( + bigtable_table_admin.ListTablesRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_tables_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListTablesResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListTablesResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_tables( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_tables_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_tables( + bigtable_table_admin.ListTablesRequest(), + parent='parent_value', + ) + + +def test_list_tables_pager(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + table.Table(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListTablesResponse( + tables=[], + next_page_token='def', + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_tables(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Table) + for i in results) +def test_list_tables_pages(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + table.Table(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListTablesResponse( + tables=[], + next_page_token='def', + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + ], + ), + RuntimeError, + ) + pages = list(client.list_tables(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_tables_async_pager(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + table.Table(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListTablesResponse( + tables=[], + next_page_token='def', + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_tables(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.Table) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_tables_async_pages(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_tables), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + table.Table(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListTablesResponse( + tables=[], + next_page_token='def', + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_tables(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.GetTableRequest, + dict, +]) +def test_get_table(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table( + name='name_value', + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + response = client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == 'name_value' + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +def test_get_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table), + '__call__') as call: + client.get_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetTableRequest() + +@pytest.mark.asyncio +async def test_get_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.GetTableRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(table.Table( + name='name_value', + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + )) + response = await client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == 'name_value' + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +@pytest.mark.asyncio +async def test_get_table_async_from_dict(): + await test_get_table_async(request_type=dict) + + +def test_get_table_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetTableRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table), + '__call__') as call: + call.return_value = table.Table() + client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetTableRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + await client.get_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_table_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_table( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_table_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_table( + bigtable_table_admin.GetTableRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_table( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_table( + bigtable_table_admin.GetTableRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.UpdateTableRequest, + dict, +]) +def test_update_table(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_update_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table), + '__call__') as call: + client.update_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateTableRequest() + +@pytest.mark.asyncio +async def test_update_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.UpdateTableRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_update_table_async_from_dict(): + await test_update_table_async(request_type=dict) + + +def test_update_table_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateTableRequest() + + request.table.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'table.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateTableRequest() + + request.table.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.update_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'table.name=name_value', + ) in kw['metadata'] + + +def test_update_table_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_table( + table=gba_table.Table(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].table + mock_val = gba_table.Table(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_table_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_table( + bigtable_table_admin.UpdateTableRequest(), + table=gba_table.Table(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_table( + table=gba_table.Table(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].table + mock_val = gba_table.Table(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_table( + bigtable_table_admin.UpdateTableRequest(), + table=gba_table.Table(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.DeleteTableRequest, + dict, +]) +def test_delete_table(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteTableRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_table), + '__call__') as call: + client.delete_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteTableRequest() + +@pytest.mark.asyncio +async def test_delete_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.DeleteTableRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteTableRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_table_async_from_dict(): + await test_delete_table_async(request_type=dict) + + +def test_delete_table_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteTableRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_table), + '__call__') as call: + call.return_value = None + client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteTableRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_table), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_table_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_table( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_table_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_table( + bigtable_table_admin.DeleteTableRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_table( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_table( + bigtable_table_admin.DeleteTableRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.UndeleteTableRequest, + dict, +]) +def test_undelete_table(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undelete_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UndeleteTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_undelete_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undelete_table), + '__call__') as call: + client.undelete_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UndeleteTableRequest() + +@pytest.mark.asyncio +async def test_undelete_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.UndeleteTableRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undelete_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UndeleteTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_undelete_table_async_from_dict(): + await test_undelete_table_async(request_type=dict) + + +def test_undelete_table_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UndeleteTableRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undelete_table), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_undelete_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UndeleteTableRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undelete_table), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.undelete_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_undelete_table_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undelete_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.undelete_table( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_undelete_table_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undelete_table( + bigtable_table_admin.UndeleteTableRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_undelete_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.undelete_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.undelete_table( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_undelete_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.undelete_table( + bigtable_table_admin.UndeleteTableRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.ModifyColumnFamiliesRequest, + dict, +]) +def test_modify_column_families(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table( + name='name_value', + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + response = client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == 'name_value' + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +def test_modify_column_families_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), + '__call__') as call: + client.modify_column_families() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + +@pytest.mark.asyncio +async def test_modify_column_families_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.ModifyColumnFamiliesRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(table.Table( + name='name_value', + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + )) + response = await client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == 'name_value' + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +@pytest.mark.asyncio +async def test_modify_column_families_async_from_dict(): + await test_modify_column_families_async(request_type=dict) + + +def test_modify_column_families_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), + '__call__') as call: + call.return_value = table.Table() + client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_modify_column_families_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + await client.modify_column_families(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_modify_column_families_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.modify_column_families( + name='name_value', + modifications=[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].modifications + mock_val = [bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')] + assert arg == mock_val + + +def test_modify_column_families_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name='name_value', + modifications=[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')], + ) + +@pytest.mark.asyncio +async def test_modify_column_families_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.modify_column_families), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Table() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.modify_column_families( + name='name_value', + modifications=[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].modifications + mock_val = [bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_modify_column_families_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name='name_value', + modifications=[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')], + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.DropRowRangeRequest, + dict, +]) +def test_drop_row_range(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.drop_row_range), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DropRowRangeRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_drop_row_range_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.drop_row_range), + '__call__') as call: + client.drop_row_range() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DropRowRangeRequest() + +@pytest.mark.asyncio +async def test_drop_row_range_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.DropRowRangeRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.drop_row_range), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DropRowRangeRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_drop_row_range_async_from_dict(): + await test_drop_row_range_async(request_type=dict) + + +def test_drop_row_range_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DropRowRangeRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.drop_row_range), + '__call__') as call: + call.return_value = None + client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_drop_row_range_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DropRowRangeRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.drop_row_range), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.drop_row_range(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.GenerateConsistencyTokenRequest, + dict, +]) +def test_generate_consistency_token(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token='consistency_token_value', + ) + response = client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + assert response.consistency_token == 'consistency_token_value' + + +def test_generate_consistency_token_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), + '__call__') as call: + client.generate_consistency_token() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + +@pytest.mark.asyncio +async def test_generate_consistency_token_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.GenerateConsistencyTokenRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token='consistency_token_value', + )) + response = await client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + assert response.consistency_token == 'consistency_token_value' + + +@pytest.mark.asyncio +async def test_generate_consistency_token_async_from_dict(): + await test_generate_consistency_token_async(request_type=dict) + + +def test_generate_consistency_token_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GenerateConsistencyTokenRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), + '__call__') as call: + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_generate_consistency_token_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GenerateConsistencyTokenRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.GenerateConsistencyTokenResponse()) + await client.generate_consistency_token(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_generate_consistency_token_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.generate_consistency_token( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_generate_consistency_token_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_generate_consistency_token_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.generate_consistency_token), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.GenerateConsistencyTokenResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.generate_consistency_token( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_generate_consistency_token_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.CheckConsistencyRequest, + dict, +]) +def test_check_consistency(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.CheckConsistencyResponse( + consistent=True, + ) + response = client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + assert response.consistent is True + + +def test_check_consistency_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), + '__call__') as call: + client.check_consistency() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + +@pytest.mark.asyncio +async def test_check_consistency_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.CheckConsistencyRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.CheckConsistencyResponse( + consistent=True, + )) + response = await client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CheckConsistencyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + assert response.consistent is True + + +@pytest.mark.asyncio +async def test_check_consistency_async_from_dict(): + await test_check_consistency_async(request_type=dict) + + +def test_check_consistency_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CheckConsistencyRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), + '__call__') as call: + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_check_consistency_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CheckConsistencyRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.CheckConsistencyResponse()) + await client.check_consistency(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_check_consistency_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.check_consistency( + name='name_value', + consistency_token='consistency_token_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].consistency_token + mock_val = 'consistency_token_value' + assert arg == mock_val + + +def test_check_consistency_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), + name='name_value', + consistency_token='consistency_token_value', + ) + +@pytest.mark.asyncio +async def test_check_consistency_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.check_consistency), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.CheckConsistencyResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.CheckConsistencyResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.check_consistency( + name='name_value', + consistency_token='consistency_token_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].consistency_token + mock_val = 'consistency_token_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_check_consistency_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), + name='name_value', + consistency_token='consistency_token_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.SnapshotTableRequest, + dict, +]) +def test_snapshot_table(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.snapshot_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.SnapshotTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_snapshot_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.snapshot_table), + '__call__') as call: + client.snapshot_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.SnapshotTableRequest() + +@pytest.mark.asyncio +async def test_snapshot_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.SnapshotTableRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.snapshot_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.SnapshotTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_snapshot_table_async_from_dict(): + await test_snapshot_table_async(request_type=dict) + + +def test_snapshot_table_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.SnapshotTableRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.snapshot_table), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_snapshot_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.SnapshotTableRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.snapshot_table), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.snapshot_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_snapshot_table_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.snapshot_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.snapshot_table( + name='name_value', + cluster='cluster_value', + snapshot_id='snapshot_id_value', + description='description_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].cluster + mock_val = 'cluster_value' + assert arg == mock_val + arg = args[0].snapshot_id + mock_val = 'snapshot_id_value' + assert arg == mock_val + arg = args[0].description + mock_val = 'description_value' + assert arg == mock_val + + +def test_snapshot_table_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name='name_value', + cluster='cluster_value', + snapshot_id='snapshot_id_value', + description='description_value', + ) + +@pytest.mark.asyncio +async def test_snapshot_table_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.snapshot_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.snapshot_table( + name='name_value', + cluster='cluster_value', + snapshot_id='snapshot_id_value', + description='description_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + arg = args[0].cluster + mock_val = 'cluster_value' + assert arg == mock_val + arg = args[0].snapshot_id + mock_val = 'snapshot_id_value' + assert arg == mock_val + arg = args[0].description + mock_val = 'description_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_snapshot_table_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name='name_value', + cluster='cluster_value', + snapshot_id='snapshot_id_value', + description='description_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.GetSnapshotRequest, + dict, +]) +def test_get_snapshot(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_snapshot), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Snapshot( + name='name_value', + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description='description_value', + ) + response = client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetSnapshotRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Snapshot) + assert response.name == 'name_value' + assert response.data_size_bytes == 1594 + assert response.state == table.Snapshot.State.READY + assert response.description == 'description_value' + + +def test_get_snapshot_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_snapshot), + '__call__') as call: + client.get_snapshot() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetSnapshotRequest() + +@pytest.mark.asyncio +async def test_get_snapshot_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.GetSnapshotRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_snapshot), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot( + name='name_value', + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description='description_value', + )) + response = await client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetSnapshotRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Snapshot) + assert response.name == 'name_value' + assert response.data_size_bytes == 1594 + assert response.state == table.Snapshot.State.READY + assert response.description == 'description_value' + + +@pytest.mark.asyncio +async def test_get_snapshot_async_from_dict(): + await test_get_snapshot_async(request_type=dict) + + +def test_get_snapshot_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetSnapshotRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_snapshot), + '__call__') as call: + call.return_value = table.Snapshot() + client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_snapshot_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetSnapshotRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_snapshot), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) + await client.get_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_snapshot_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_snapshot), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Snapshot() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_snapshot( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_snapshot_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_snapshot_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_snapshot), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Snapshot() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_snapshot( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_snapshot_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.ListSnapshotsRequest, + dict, +]) +def test_list_snapshots(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSnapshotsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListSnapshotsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSnapshotsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_snapshots_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), + '__call__') as call: + client.list_snapshots() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListSnapshotsRequest() + +@pytest.mark.asyncio +async def test_list_snapshots_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.ListSnapshotsRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListSnapshotsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListSnapshotsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSnapshotsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_snapshots_async_from_dict(): + await test_list_snapshots_async(request_type=dict) + + +def test_list_snapshots_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListSnapshotsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), + '__call__') as call: + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_snapshots_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListSnapshotsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListSnapshotsResponse()) + await client.list_snapshots(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_snapshots_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_snapshots( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_snapshots_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_snapshots_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListSnapshotsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListSnapshotsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_snapshots( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_snapshots_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), + parent='parent_value', + ) + + +def test_list_snapshots_pager(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], + next_page_token='def', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_snapshots(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Snapshot) + for i in results) +def test_list_snapshots_pages(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], + next_page_token='def', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + ], + ), + RuntimeError, + ) + pages = list(client.list_snapshots(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_snapshots_async_pager(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], + next_page_token='def', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_snapshots(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.Snapshot) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_snapshots_async_pages(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_snapshots), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], + next_page_token='def', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_snapshots(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.DeleteSnapshotRequest, + dict, +]) +def test_delete_snapshot(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_snapshot), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_snapshot_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_snapshot), + '__call__') as call: + client.delete_snapshot() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() + +@pytest.mark.asyncio +async def test_delete_snapshot_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.DeleteSnapshotRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_snapshot), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_snapshot_async_from_dict(): + await test_delete_snapshot_async(request_type=dict) + + +def test_delete_snapshot_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteSnapshotRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_snapshot), + '__call__') as call: + call.return_value = None + client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_snapshot_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteSnapshotRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_snapshot), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_snapshot(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_snapshot_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_snapshot), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_snapshot( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_snapshot_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_snapshot_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_snapshot), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_snapshot( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_snapshot_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.CreateBackupRequest, + dict, +]) +def test_create_backup(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_create_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_backup), + '__call__') as call: + client.create_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateBackupRequest() + +@pytest.mark.asyncio +async def test_create_backup_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.CreateBackupRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CreateBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_create_backup_async_from_dict(): + await test_create_backup_async(request_type=dict) + + +def test_create_backup_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateBackupRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_backup), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_create_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CreateBackupRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_backup), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.create_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_create_backup_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.create_backup( + parent='parent_value', + backup_id='backup_id_value', + backup=table.Backup(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].backup_id + mock_val = 'backup_id_value' + assert arg == mock_val + arg = args[0].backup + mock_val = table.Backup(name='name_value') + assert arg == mock_val + + +def test_create_backup_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_backup( + bigtable_table_admin.CreateBackupRequest(), + parent='parent_value', + backup_id='backup_id_value', + backup=table.Backup(name='name_value'), + ) + +@pytest.mark.asyncio +async def test_create_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.create_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.create_backup( + parent='parent_value', + backup_id='backup_id_value', + backup=table.Backup(name='name_value'), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].backup_id + mock_val = 'backup_id_value' + assert arg == mock_val + arg = args[0].backup + mock_val = table.Backup(name='name_value') + assert arg == mock_val + +@pytest.mark.asyncio +async def test_create_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.create_backup( + bigtable_table_admin.CreateBackupRequest(), + parent='parent_value', + backup_id='backup_id_value', + backup=table.Backup(name='name_value'), + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.GetBackupRequest, + dict, +]) +def test_get_backup(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup( + name='name_value', + source_table='source_table_value', + source_backup='source_backup_value', + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + response = client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + assert response.name == 'name_value' + assert response.source_table == 'source_table_value' + assert response.source_backup == 'source_backup_value' + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + + +def test_get_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_backup), + '__call__') as call: + client.get_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetBackupRequest() + +@pytest.mark.asyncio +async def test_get_backup_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.GetBackupRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(table.Backup( + name='name_value', + source_table='source_table_value', + source_backup='source_backup_value', + size_bytes=1089, + state=table.Backup.State.CREATING, + )) + response = await client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.GetBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + assert response.name == 'name_value' + assert response.source_table == 'source_table_value' + assert response.source_backup == 'source_backup_value' + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + + +@pytest.mark.asyncio +async def test_get_backup_async_from_dict(): + await test_get_backup_async(request_type=dict) + + +def test_get_backup_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetBackupRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_backup), + '__call__') as call: + call.return_value = table.Backup() + client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.GetBackupRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_backup), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + await client.get_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_get_backup_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_backup( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_get_backup_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_backup( + bigtable_table_admin.GetBackupRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_get_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_backup( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_backup( + bigtable_table_admin.GetBackupRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.UpdateBackupRequest, + dict, +]) +def test_update_backup(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup( + name='name_value', + source_table='source_table_value', + source_backup='source_backup_value', + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + response = client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + assert response.name == 'name_value' + assert response.source_table == 'source_table_value' + assert response.source_backup == 'source_backup_value' + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + + +def test_update_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_backup), + '__call__') as call: + client.update_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateBackupRequest() + +@pytest.mark.asyncio +async def test_update_backup_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.UpdateBackupRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(table.Backup( + name='name_value', + source_table='source_table_value', + source_backup='source_backup_value', + size_bytes=1089, + state=table.Backup.State.CREATING, + )) + response = await client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.UpdateBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + assert response.name == 'name_value' + assert response.source_table == 'source_table_value' + assert response.source_backup == 'source_backup_value' + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + + +@pytest.mark.asyncio +async def test_update_backup_async_from_dict(): + await test_update_backup_async(request_type=dict) + + +def test_update_backup_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateBackupRequest() + + request.backup.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_backup), + '__call__') as call: + call.return_value = table.Backup() + client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'backup.name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_update_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.UpdateBackupRequest() + + request.backup.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_backup), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + await client.update_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'backup.name=name_value', + ) in kw['metadata'] + + +def test_update_backup_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.update_backup( + backup=table.Backup(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].backup + mock_val = table.Backup(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + + +def test_update_backup_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + +@pytest.mark.asyncio +async def test_update_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.update_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = table.Backup() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.update_backup( + backup=table.Backup(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].backup + mock_val = table.Backup(name='name_value') + assert arg == mock_val + arg = args[0].update_mask + mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) + assert arg == mock_val + +@pytest.mark.asyncio +async def test_update_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.DeleteBackupRequest, + dict, +]) +def test_delete_backup(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + response = client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteBackupRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_backup), + '__call__') as call: + client.delete_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteBackupRequest() + +@pytest.mark.asyncio +async def test_delete_backup_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.DeleteBackupRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + response = await client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.DeleteBackupRequest() + + # Establish that the response is the type that we expect. + assert response is None + + +@pytest.mark.asyncio +async def test_delete_backup_async_from_dict(): + await test_delete_backup_async(request_type=dict) + + +def test_delete_backup_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteBackupRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_backup), + '__call__') as call: + call.return_value = None + client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_delete_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.DeleteBackupRequest() + + request.name = 'name_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_backup), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + await client.delete_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'name=name_value', + ) in kw['metadata'] + + +def test_delete_backup_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.delete_backup( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + + +def test_delete_backup_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), + name='name_value', + ) + +@pytest.mark.asyncio +async def test_delete_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.delete_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = None + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.delete_backup( + name='name_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].name + mock_val = 'name_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_delete_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), + name='name_value', + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.ListBackupsRequest, + dict, +]) +def test_list_backups(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse( + next_page_token='next_page_token_value', + ) + response = client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListBackupsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBackupsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_backups_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), + '__call__') as call: + client.list_backups() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListBackupsRequest() + +@pytest.mark.asyncio +async def test_list_backups_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.ListBackupsRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListBackupsResponse( + next_page_token='next_page_token_value', + )) + response = await client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.ListBackupsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBackupsAsyncPager) + assert response.next_page_token == 'next_page_token_value' + + +@pytest.mark.asyncio +async def test_list_backups_async_from_dict(): + await test_list_backups_async(request_type=dict) + + +def test_list_backups_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListBackupsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), + '__call__') as call: + call.return_value = bigtable_table_admin.ListBackupsResponse() + client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_list_backups_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.ListBackupsRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListBackupsResponse()) + await client.list_backups(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_list_backups_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.list_backups( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + + +def test_list_backups_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_backups( + bigtable_table_admin.ListBackupsRequest(), + parent='parent_value', + ) + +@pytest.mark.asyncio +async def test_list_backups_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = bigtable_table_admin.ListBackupsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListBackupsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.list_backups( + parent='parent_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_list_backups_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.list_backups( + bigtable_table_admin.ListBackupsRequest(), + parent='parent_value', + ) + + +def test_list_backups_pager(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token='def', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + RuntimeError, + ) + + metadata = () + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(( + ('parent', ''), + )), + ) + pager = client.list_backups(request={}) + + assert pager._metadata == metadata + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Backup) + for i in results) +def test_list_backups_pages(transport_name: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials, + transport=transport_name, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), + '__call__') as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token='def', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + RuntimeError, + ) + pages = list(client.list_backups(request={}).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.asyncio +async def test_list_backups_async_pager(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token='def', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + RuntimeError, + ) + async_pager = await client.list_backups(request={},) + assert async_pager.next_page_token == 'abc' + responses = [] + async for response in async_pager: # pragma: no branch + responses.append(response) + + assert len(responses) == 6 + assert all(isinstance(i, table.Backup) + for i in responses) + + +@pytest.mark.asyncio +async def test_list_backups_async_pages(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials, + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.list_backups), + '__call__', new_callable=mock.AsyncMock) as call: + # Set the response to a series of pages. + call.side_effect = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token='def', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + RuntimeError, + ) + pages = [] + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch + await client.list_backups(request={}) + ).pages: + pages.append(page_) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.RestoreTableRequest, + dict, +]) +def test_restore_table(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.restore_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.RestoreTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_restore_table_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.restore_table), + '__call__') as call: + client.restore_table() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.RestoreTableRequest() + +@pytest.mark.asyncio +async def test_restore_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.RestoreTableRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.restore_table), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.RestoreTableRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_restore_table_async_from_dict(): + await test_restore_table_async(request_type=dict) + + +def test_restore_table_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.RestoreTableRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.restore_table), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_restore_table_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.RestoreTableRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.restore_table), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.restore_table(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.CopyBackupRequest, + dict, +]) +def test_copy_backup(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.copy_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/spam') + response = client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_copy_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.copy_backup), + '__call__') as call: + client.copy_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + +@pytest.mark.asyncio +async def test_copy_backup_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.CopyBackupRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.copy_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + response = await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_copy_backup_async_from_dict(): + await test_copy_backup_async(request_type=dict) + + +def test_copy_backup_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CopyBackupRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.copy_backup), + '__call__') as call: + call.return_value = operations_pb2.Operation(name='operations/op') + client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_copy_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CopyBackupRequest() + + request.parent = 'parent_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.copy_backup), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) + await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'parent=parent_value', + ) in kw['metadata'] + + +def test_copy_backup_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.copy_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.copy_backup( + parent='parent_value', + backup_id='backup_id_value', + source_backup='source_backup_value', + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].backup_id + mock_val = 'backup_id_value' + assert arg == mock_val + arg = args[0].source_backup + mock_val = 'source_backup_value' + assert arg == mock_val + assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp(seconds=751) + + +def test_copy_backup_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent='parent_value', + backup_id='backup_id_value', + source_backup='source_backup_value', + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + +@pytest.mark.asyncio +async def test_copy_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.copy_backup), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name='operations/op') + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name='operations/spam') + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.copy_backup( + parent='parent_value', + backup_id='backup_id_value', + source_backup='source_backup_value', + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = 'parent_value' + assert arg == mock_val + arg = args[0].backup_id + mock_val = 'backup_id_value' + assert arg == mock_val + arg = args[0].source_backup + mock_val = 'source_backup_value' + assert arg == mock_val + assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp(seconds=751) + +@pytest.mark.asyncio +async def test_copy_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent='parent_value', + backup_id='backup_id_value', + source_backup='source_backup_value', + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +@pytest.mark.parametrize("request_type", [ + iam_policy_pb2.GetIamPolicyRequest, + dict, +]) +def test_get_iam_policy(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b'etag_blob', + ) + response = client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b'etag_blob' + + +def test_get_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + client.get_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + +@pytest.mark.asyncio +async def test_get_iam_policy_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.GetIamPolicyRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy( + version=774, + etag=b'etag_blob', + )) + response = await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.GetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b'etag_blob' + + +@pytest.mark.asyncio +async def test_get_iam_policy_async_from_dict(): + await test_get_iam_policy_async(request_type=dict) + + +def test_get_iam_policy_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + + request.resource = 'resource_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + call.return_value = policy_pb2.Policy() + client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'resource=resource_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_get_iam_policy_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.GetIamPolicyRequest() + + request.resource = 'resource_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + await client.get_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'resource=resource_value', + ) in kw['metadata'] + +def test_get_iam_policy_from_dict_foreign(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + response = client.get_iam_policy(request={ + 'resource': 'resource_value', + 'options': options_pb2.GetPolicyOptions(requested_policy_version=2598), + } + ) + call.assert_called() + + +def test_get_iam_policy_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.get_iam_policy( + resource='resource_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = 'resource_value' + assert arg == mock_val + + +def test_get_iam_policy_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource='resource_value', + ) + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.get_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.get_iam_policy( + resource='resource_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = 'resource_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_get_iam_policy_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource='resource_value', + ) + + +@pytest.mark.parametrize("request_type", [ + iam_policy_pb2.SetIamPolicyRequest, + dict, +]) +def test_set_iam_policy(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy( + version=774, + etag=b'etag_blob', + ) + response = client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b'etag_blob' + + +def test_set_iam_policy_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + client.set_iam_policy() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + +@pytest.mark.asyncio +async def test_set_iam_policy_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.SetIamPolicyRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy( + version=774, + etag=b'etag_blob', + )) + response = await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.SetIamPolicyRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b'etag_blob' + + +@pytest.mark.asyncio +async def test_set_iam_policy_async_from_dict(): + await test_set_iam_policy_async(request_type=dict) + + +def test_set_iam_policy_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + + request.resource = 'resource_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + call.return_value = policy_pb2.Policy() + client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'resource=resource_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_set_iam_policy_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.SetIamPolicyRequest() + + request.resource = 'resource_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + await client.set_iam_policy(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'resource=resource_value', + ) in kw['metadata'] + +def test_set_iam_policy_from_dict_foreign(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + response = client.set_iam_policy(request={ + 'resource': 'resource_value', + 'policy': policy_pb2.Policy(version=774), + 'update_mask': field_mask_pb2.FieldMask(paths=['paths_value']), + } + ) + call.assert_called() + + +def test_set_iam_policy_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.set_iam_policy( + resource='resource_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = 'resource_value' + assert arg == mock_val + + +def test_set_iam_policy_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource='resource_value', + ) + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.set_iam_policy), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = policy_pb2.Policy() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.set_iam_policy( + resource='resource_value', + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = 'resource_value' + assert arg == mock_val + +@pytest.mark.asyncio +async def test_set_iam_policy_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource='resource_value', + ) + + +@pytest.mark.parametrize("request_type", [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, +]) +def test_test_iam_permissions(request_type, transport: str = 'grpc'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=['permissions_value'], + ) + response = client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + client.test_iam_permissions() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + +@pytest.mark.asyncio +async def test_test_iam_permissions_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.TestIamPermissionsRequest): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse( + permissions=['permissions_value'], + )) + response = await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_async_from_dict(): + await test_test_iam_permissions_async(request_type=dict) + + +def test_test_iam_permissions_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + + request.resource = 'resource_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'resource=resource_value', + ) in kw['metadata'] + + +@pytest.mark.asyncio +async def test_test_iam_permissions_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = iam_policy_pb2.TestIamPermissionsRequest() + + request.resource = 'resource_value' + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse()) + await client.test_iam_permissions(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + 'x-goog-request-params', + 'resource=resource_value', + ) in kw['metadata'] + +def test_test_iam_permissions_from_dict_foreign(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + response = client.test_iam_permissions(request={ + 'resource': 'resource_value', + 'permissions': ['permissions_value'], + } + ) + call.assert_called() + + +def test_test_iam_permissions_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.test_iam_permissions( + resource='resource_value', + permissions=['permissions_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = 'resource_value' + assert arg == mock_val + arg = args[0].permissions + mock_val = ['permissions_value'] + assert arg == mock_val + + +def test_test_iam_permissions_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource='resource_value', + permissions=['permissions_value'], + ) + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object( + type(client.transport.test_iam_permissions), + '__call__') as call: + # Designate an appropriate return value for the call. + call.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse()) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.test_iam_permissions( + resource='resource_value', + permissions=['permissions_value'], + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].resource + mock_val = 'resource_value' + assert arg == mock_val + arg = args[0].permissions + mock_val = ['permissions_value'] + assert arg == mock_val + +@pytest.mark.asyncio +async def test_test_iam_permissions_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource='resource_value', + permissions=['permissions_value'], + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.CreateTableRequest, + dict, +]) +def test_create_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gba_table.Table( + name='name_value', + granularity=gba_table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_table(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, gba_table.Table) + assert response.name == 'name_value' + assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +def test_create_table_rest_required_fields(request_type=bigtable_table_admin.CreateTableRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["table_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + jsonified_request["tableId"] = 'table_id_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == 'table_id_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = gba_table.Table() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.create_table(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_create_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.create_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("parent", "tableId", "table", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_create_table") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_create_table") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CreateTableRequest.pb(bigtable_table_admin.CreateTableRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = gba_table.Table.to_json(gba_table.Table()) + + request = bigtable_table_admin.CreateTableRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = gba_table.Table() + + client.create_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.CreateTableRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_table(request) + + +def test_create_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = gba_table.Table() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/instances/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + table_id='table_id_value', + table=gba_table.Table(name='name_value'), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.create_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, args[1]) + + +def test_create_table_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table( + bigtable_table_admin.CreateTableRequest(), + parent='parent_value', + table_id='table_id_value', + table=gba_table.Table(name='name_value'), + ) + + +def test_create_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.CreateTableFromSnapshotRequest, + dict, +]) +def test_create_table_from_snapshot_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_table_from_snapshot(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_table_from_snapshot_rest_required_fields(request_type=bigtable_table_admin.CreateTableFromSnapshotRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["table_id"] = "" + request_init["source_snapshot"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_table_from_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + jsonified_request["tableId"] = 'table_id_value' + jsonified_request["sourceSnapshot"] = 'source_snapshot_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_table_from_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == 'table_id_value' + assert "sourceSnapshot" in jsonified_request + assert jsonified_request["sourceSnapshot"] == 'source_snapshot_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.create_table_from_snapshot(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_create_table_from_snapshot_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.create_table_from_snapshot._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("parent", "tableId", "sourceSnapshot", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_table_from_snapshot_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb(bigtable_table_admin.CreateTableFromSnapshotRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = bigtable_table_admin.CreateTableFromSnapshotRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_table_from_snapshot(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_table_from_snapshot_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.CreateTableFromSnapshotRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_table_from_snapshot(request) + + +def test_create_table_from_snapshot_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/instances/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + table_id='table_id_value', + source_snapshot='source_snapshot_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.create_table_from_snapshot(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" % client.transport._host, args[1]) + + +def test_create_table_from_snapshot_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_table_from_snapshot( + bigtable_table_admin.CreateTableFromSnapshotRequest(), + parent='parent_value', + table_id='table_id_value', + source_snapshot='source_snapshot_value', + ) + + +def test_create_table_from_snapshot_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.ListTablesRequest, + dict, +]) +def test_list_tables_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_tables(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListTablesPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_tables_rest_required_fields(request_type=bigtable_table_admin.ListTablesRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_tables._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_tables._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("page_size", "page_token", "view", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_tables(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_tables_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_tables._get_unset_required_fields({}) + assert set(unset_fields) == (set(("pageSize", "pageToken", "view", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_tables_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_list_tables") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_list_tables") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ListTablesRequest.pb(bigtable_table_admin.ListTablesRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable_table_admin.ListTablesResponse.to_json(bigtable_table_admin.ListTablesResponse()) + + request = bigtable_table_admin.ListTablesRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListTablesResponse() + + client.list_tables(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_tables_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.ListTablesRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_tables(request) + + +def test_list_tables_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListTablesResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/instances/sample2'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_tables(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, args[1]) + + +def test_list_tables_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_tables( + bigtable_table_admin.ListTablesRequest(), + parent='parent_value', + ) + + +def test_list_tables_rest_pager(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + table.Table(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListTablesResponse( + tables=[], + next_page_token='def', + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListTablesResponse( + tables=[ + table.Table(), + table.Table(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(bigtable_table_admin.ListTablesResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/instances/sample2'} + + pager = client.list_tables(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Table) + for i in results) + + pages = list(client.list_tables(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.GetTableRequest, + dict, +]) +def test_get_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = table.Table( + name='name_value', + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_table(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == 'name_value' + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +def test_get_table_rest_required_fields(request_type=bigtable_table_admin.GetTableRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_table._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("view", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Table() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_table(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(("view", )) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_get_table") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_get_table") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GetTableRequest.pb(bigtable_table_admin.GetTableRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = table.Table.to_json(table.Table()) + + request = bigtable_table_admin.GetTableRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Table() + + client.get_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.GetTableRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_table(request) + + +def test_get_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = table.Table() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, args[1]) + + +def test_get_table_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_table( + bigtable_table_admin.GetTableRequest(), + name='name_value', + ) + + +def test_get_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.UpdateTableRequest, + dict, +]) +def test_update_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'table': {'name': 'projects/sample1/instances/sample2/tables/sample3'}} + request_init["table"] = {'name': 'projects/sample1/instances/sample2/tables/sample3', 'cluster_states': {}, 'column_families': {}, 'granularity': 1, 'restore_info': {'source_type': 1, 'backup_info': {'backup': 'backup_value', 'start_time': {'seconds': 751, 'nanos': 543}, 'end_time': {}, 'source_table': 'source_table_value', 'source_backup': 'source_backup_value'}}, 'change_stream_config': {'retention_period': {'seconds': 751, 'nanos': 543}}, 'deletion_protection': True} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["table"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["table"][field])): + del request_init["table"][field][i][subfield] + else: + del request_init["table"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_table(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_update_table_rest_required_fields(request_type=bigtable_table_admin.UpdateTableRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_table._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "patch", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.update_table(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_update_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.update_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask", )) & set(("table", "updateMask", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_update_table") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_update_table") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.UpdateTableRequest.pb(bigtable_table_admin.UpdateTableRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = bigtable_table_admin.UpdateTableRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.update_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.UpdateTableRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'table': {'name': 'projects/sample1/instances/sample2/tables/sample3'}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_table(request) + + +def test_update_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'table': {'name': 'projects/sample1/instances/sample2/tables/sample3'}} + + # get truthy value for each flattened field + mock_args = dict( + table=gba_table.Table(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.update_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{table.name=projects/*/instances/*/tables/*}" % client.transport._host, args[1]) + + +def test_update_table_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_table( + bigtable_table_admin.UpdateTableRequest(), + table=gba_table.Table(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_update_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.DeleteTableRequest, + dict, +]) +def test_delete_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_table(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_table_rest_required_fields(request_type=bigtable_table_admin.DeleteTableRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "delete", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_table(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_delete_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.delete_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_delete_table") as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DeleteTableRequest.pb(bigtable_table_admin.DeleteTableRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_table_admin.DeleteTableRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + + +def test_delete_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.DeleteTableRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_table(request) + + +def test_delete_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.delete_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, args[1]) + + +def test_delete_table_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_table( + bigtable_table_admin.DeleteTableRequest(), + name='name_value', + ) + + +def test_delete_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.UndeleteTableRequest, + dict, +]) +def test_undelete_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.undelete_table(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_undelete_table_rest_required_fields(request_type=bigtable_table_admin.UndeleteTableRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).undelete_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).undelete_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.undelete_table(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_undelete_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.undelete_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_undelete_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_undelete_table") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_undelete_table") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.UndeleteTableRequest.pb(bigtable_table_admin.UndeleteTableRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = bigtable_table_admin.UndeleteTableRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.undelete_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_undelete_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.UndeleteTableRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.undelete_table(request) + + +def test_undelete_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.undelete_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}:undelete" % client.transport._host, args[1]) + + +def test_undelete_table_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.undelete_table( + bigtable_table_admin.UndeleteTableRequest(), + name='name_value', + ) + + +def test_undelete_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.ModifyColumnFamiliesRequest, + dict, +]) +def test_modify_column_families_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = table.Table( + name='name_value', + granularity=table.Table.TimestampGranularity.MILLIS, + deletion_protection=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.modify_column_families(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Table) + assert response.name == 'name_value' + assert response.granularity == table.Table.TimestampGranularity.MILLIS + assert response.deletion_protection is True + + +def test_modify_column_families_rest_required_fields(request_type=bigtable_table_admin.ModifyColumnFamiliesRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).modify_column_families._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).modify_column_families._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Table() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.modify_column_families(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_modify_column_families_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.modify_column_families._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "modifications", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_modify_column_families_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_modify_column_families") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_modify_column_families") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ModifyColumnFamiliesRequest.pb(bigtable_table_admin.ModifyColumnFamiliesRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = table.Table.to_json(table.Table()) + + request = bigtable_table_admin.ModifyColumnFamiliesRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Table() + + client.modify_column_families(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_modify_column_families_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.ModifyColumnFamiliesRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.modify_column_families(request) + + +def test_modify_column_families_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = table.Table() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + modifications=[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.modify_column_families(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" % client.transport._host, args[1]) + + +def test_modify_column_families_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.modify_column_families( + bigtable_table_admin.ModifyColumnFamiliesRequest(), + name='name_value', + modifications=[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')], + ) + + +def test_modify_column_families_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.DropRowRangeRequest, + dict, +]) +def test_drop_row_range_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.drop_row_range(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_drop_row_range_rest_required_fields(request_type=bigtable_table_admin.DropRowRangeRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).drop_row_range._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).drop_row_range._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.drop_row_range(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_drop_row_range_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.drop_row_range._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_drop_row_range_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_drop_row_range") as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DropRowRangeRequest.pb(bigtable_table_admin.DropRowRangeRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_table_admin.DropRowRangeRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.drop_row_range(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + + +def test_drop_row_range_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.DropRowRangeRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.drop_row_range(request) + + +def test_drop_row_range_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.GenerateConsistencyTokenRequest, + dict, +]) +def test_generate_consistency_token_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( + consistency_token='consistency_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.generate_consistency_token(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) + assert response.consistency_token == 'consistency_token_value' + + +def test_generate_consistency_token_rest_required_fields(request_type=bigtable_table_admin.GenerateConsistencyTokenRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).generate_consistency_token._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).generate_consistency_token._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.generate_consistency_token(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_generate_consistency_token_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.generate_consistency_token._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_generate_consistency_token_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_generate_consistency_token") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_generate_consistency_token") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GenerateConsistencyTokenRequest.pb(bigtable_table_admin.GenerateConsistencyTokenRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable_table_admin.GenerateConsistencyTokenResponse.to_json(bigtable_table_admin.GenerateConsistencyTokenResponse()) + + request = bigtable_table_admin.GenerateConsistencyTokenRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + + client.generate_consistency_token(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_generate_consistency_token_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.GenerateConsistencyTokenRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.generate_consistency_token(request) + + +def test_generate_consistency_token_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.generate_consistency_token(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" % client.transport._host, args[1]) + + +def test_generate_consistency_token_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.generate_consistency_token( + bigtable_table_admin.GenerateConsistencyTokenRequest(), + name='name_value', + ) + + +def test_generate_consistency_token_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.CheckConsistencyRequest, + dict, +]) +def test_check_consistency_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.CheckConsistencyResponse( + consistent=True, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.check_consistency(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) + assert response.consistent is True + + +def test_check_consistency_rest_required_fields(request_type=bigtable_table_admin.CheckConsistencyRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request_init["consistency_token"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).check_consistency._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + jsonified_request["consistencyToken"] = 'consistency_token_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).check_consistency._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + assert "consistencyToken" in jsonified_request + assert jsonified_request["consistencyToken"] == 'consistency_token_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.CheckConsistencyResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.check_consistency(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_check_consistency_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.check_consistency._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "consistencyToken", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_check_consistency_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_check_consistency") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_check_consistency") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CheckConsistencyRequest.pb(bigtable_table_admin.CheckConsistencyRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable_table_admin.CheckConsistencyResponse.to_json(bigtable_table_admin.CheckConsistencyResponse()) + + request = bigtable_table_admin.CheckConsistencyRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.CheckConsistencyResponse() + + client.check_consistency(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_check_consistency_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.CheckConsistencyRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.check_consistency(request) + + +def test_check_consistency_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.CheckConsistencyResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + consistency_token='consistency_token_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.check_consistency(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" % client.transport._host, args[1]) + + +def test_check_consistency_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.check_consistency( + bigtable_table_admin.CheckConsistencyRequest(), + name='name_value', + consistency_token='consistency_token_value', + ) + + +def test_check_consistency_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.SnapshotTableRequest, + dict, +]) +def test_snapshot_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.snapshot_table(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_snapshot_table_rest_required_fields(request_type=bigtable_table_admin.SnapshotTableRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request_init["cluster"] = "" + request_init["snapshot_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).snapshot_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + jsonified_request["cluster"] = 'cluster_value' + jsonified_request["snapshotId"] = 'snapshot_id_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).snapshot_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + assert "cluster" in jsonified_request + assert jsonified_request["cluster"] == 'cluster_value' + assert "snapshotId" in jsonified_request + assert jsonified_request["snapshotId"] == 'snapshot_id_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.snapshot_table(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_snapshot_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.snapshot_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", "cluster", "snapshotId", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_snapshot_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_snapshot_table") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_snapshot_table") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.SnapshotTableRequest.pb(bigtable_table_admin.SnapshotTableRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = bigtable_table_admin.SnapshotTableRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.snapshot_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_snapshot_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.SnapshotTableRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.snapshot_table(request) + + +def test_snapshot_table_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + cluster='cluster_value', + snapshot_id='snapshot_id_value', + description='description_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.snapshot_table(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}:snapshot" % client.transport._host, args[1]) + + +def test_snapshot_table_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.snapshot_table( + bigtable_table_admin.SnapshotTableRequest(), + name='name_value', + cluster='cluster_value', + snapshot_id='snapshot_id_value', + description='description_value', + ) + + +def test_snapshot_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.GetSnapshotRequest, + dict, +]) +def test_get_snapshot_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = table.Snapshot( + name='name_value', + data_size_bytes=1594, + state=table.Snapshot.State.READY, + description='description_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_snapshot(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Snapshot) + assert response.name == 'name_value' + assert response.data_size_bytes == 1594 + assert response.state == table.Snapshot.State.READY + assert response.description == 'description_value' + + +def test_get_snapshot_rest_required_fields(request_type=bigtable_table_admin.GetSnapshotRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Snapshot() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_snapshot(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_snapshot_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_snapshot._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_snapshot_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_get_snapshot") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_get_snapshot") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GetSnapshotRequest.pb(bigtable_table_admin.GetSnapshotRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = table.Snapshot.to_json(table.Snapshot()) + + request = bigtable_table_admin.GetSnapshotRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Snapshot() + + client.get_snapshot(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_snapshot_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.GetSnapshotRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_snapshot(request) + + +def test_get_snapshot_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = table.Snapshot() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_snapshot(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" % client.transport._host, args[1]) + + +def test_get_snapshot_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_snapshot( + bigtable_table_admin.GetSnapshotRequest(), + name='name_value', + ) + + +def test_get_snapshot_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.ListSnapshotsRequest, + dict, +]) +def test_list_snapshots_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListSnapshotsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_snapshots(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListSnapshotsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_snapshots_rest_required_fields(request_type=bigtable_table_admin.ListSnapshotsRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_snapshots._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_snapshots._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListSnapshotsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_snapshots(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_snapshots_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_snapshots._get_unset_required_fields({}) + assert set(unset_fields) == (set(("pageSize", "pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_snapshots_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_list_snapshots") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_list_snapshots") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ListSnapshotsRequest.pb(bigtable_table_admin.ListSnapshotsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable_table_admin.ListSnapshotsResponse.to_json(bigtable_table_admin.ListSnapshotsResponse()) + + request = bigtable_table_admin.ListSnapshotsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListSnapshotsResponse() + + client.list_snapshots(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_snapshots_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.ListSnapshotsRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_snapshots(request) + + +def test_list_snapshots_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListSnapshotsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_snapshots(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" % client.transport._host, args[1]) + + +def test_list_snapshots_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_snapshots( + bigtable_table_admin.ListSnapshotsRequest(), + parent='parent_value', + ) + + +def test_list_snapshots_rest_pager(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + table.Snapshot(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[], + next_page_token='def', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListSnapshotsResponse( + snapshots=[ + table.Snapshot(), + table.Snapshot(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(bigtable_table_admin.ListSnapshotsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + + pager = client.list_snapshots(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Snapshot) + for i in results) + + pages = list(client.list_snapshots(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.DeleteSnapshotRequest, + dict, +]) +def test_delete_snapshot_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_snapshot(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_snapshot_rest_required_fields(request_type=bigtable_table_admin.DeleteSnapshotRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_snapshot._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "delete", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_snapshot(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_delete_snapshot_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.delete_snapshot._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_snapshot_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_delete_snapshot") as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DeleteSnapshotRequest.pb(bigtable_table_admin.DeleteSnapshotRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_table_admin.DeleteSnapshotRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_snapshot(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + + +def test_delete_snapshot_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.DeleteSnapshotRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_snapshot(request) + + +def test_delete_snapshot_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.delete_snapshot(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" % client.transport._host, args[1]) + + +def test_delete_snapshot_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_snapshot( + bigtable_table_admin.DeleteSnapshotRequest(), + name='name_value', + ) + + +def test_delete_snapshot_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.CreateBackupRequest, + dict, +]) +def test_create_backup_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + request_init["backup"] = {'name': 'name_value', 'source_table': 'source_table_value', 'source_backup': 'source_backup_value', 'expire_time': {'seconds': 751, 'nanos': 543}, 'start_time': {}, 'end_time': {}, 'size_bytes': 1089, 'state': 1, 'encryption_info': {'encryption_type': 1, 'encryption_status': {'code': 411, 'message': 'message_value', 'details': [{'type_url': 'type.googleapis.com/google.protobuf.Duration', 'value': b'\x08\x0c\x10\xdb\x07'}]}, 'kms_key_version': 'kms_key_version_value'}} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.CreateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.create_backup(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_create_backup_rest_required_fields(request_type=bigtable_table_admin.CreateBackupRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["backup_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + assert "backupId" not in jsonified_request + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == request_init["backup_id"] + + jsonified_request["parent"] = 'parent_value' + jsonified_request["backupId"] = 'backup_id_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).create_backup._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("backup_id", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == 'backup_id_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.create_backup(request) + + expected_params = [ + ( + "backupId", + "", + ), + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_create_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.create_backup._get_unset_required_fields({}) + assert set(unset_fields) == (set(("backupId", )) & set(("parent", "backupId", "backup", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_create_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_create_backup") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_create_backup") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CreateBackupRequest.pb(bigtable_table_admin.CreateBackupRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = bigtable_table_admin.CreateBackupRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.create_backup(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_create_backup_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.CreateBackupRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.create_backup(request) + + +def test_create_backup_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + backup_id='backup_id_value', + backup=table.Backup(name='name_value'), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.create_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" % client.transport._host, args[1]) + + +def test_create_backup_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.create_backup( + bigtable_table_admin.CreateBackupRequest(), + parent='parent_value', + backup_id='backup_id_value', + backup=table.Backup(name='name_value'), + ) + + +def test_create_backup_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.GetBackupRequest, + dict, +]) +def test_get_backup_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup( + name='name_value', + source_table='source_table_value', + source_backup='source_backup_value', + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_backup(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + assert response.name == 'name_value' + assert response.source_table == 'source_table_value' + assert response.source_backup == 'source_backup_value' + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + + +def test_get_backup_rest_required_fields(request_type=bigtable_table_admin.GetBackupRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Backup() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_backup(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_backup._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_get_backup") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_get_backup") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.GetBackupRequest.pb(bigtable_table_admin.GetBackupRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = table.Backup.to_json(table.Backup()) + + request = bigtable_table_admin.GetBackupRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Backup() + + client.get_backup(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_backup_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.GetBackupRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_backup(request) + + +def test_get_backup_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup() + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" % client.transport._host, args[1]) + + +def test_get_backup_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_backup( + bigtable_table_admin.GetBackupRequest(), + name='name_value', + ) + + +def test_get_backup_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.UpdateBackupRequest, + dict, +]) +def test_update_backup_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'backup': {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'}} + request_init["backup"] = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4', 'source_table': 'source_table_value', 'source_backup': 'source_backup_value', 'expire_time': {'seconds': 751, 'nanos': 543}, 'start_time': {}, 'end_time': {}, 'size_bytes': 1089, 'state': 1, 'encryption_info': {'encryption_type': 1, 'encryption_status': {'code': 411, 'message': 'message_value', 'details': [{'type_url': 'type.googleapis.com/google.protobuf.Duration', 'value': b'\x08\x0c\x10\xdb\x07'}]}, 'kms_key_version': 'kms_key_version_value'}} + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + {"field": field, "subfield": subfield, "is_repeated": is_repeated} + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup( + name='name_value', + source_table='source_table_value', + source_backup='source_backup_value', + size_bytes=1089, + state=table.Backup.State.CREATING, + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.update_backup(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, table.Backup) + assert response.name == 'name_value' + assert response.source_table == 'source_table_value' + assert response.source_backup == 'source_backup_value' + assert response.size_bytes == 1089 + assert response.state == table.Backup.State.CREATING + + +def test_update_backup_rest_required_fields(request_type=bigtable_table_admin.UpdateBackupRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).update_backup._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("update_mask", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = table.Backup() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "patch", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.update_backup(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_update_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.update_backup._get_unset_required_fields({}) + assert set(unset_fields) == (set(("updateMask", )) & set(("backup", "updateMask", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_update_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_update_backup") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_update_backup") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.UpdateBackupRequest.pb(bigtable_table_admin.UpdateBackupRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = table.Backup.to_json(table.Backup()) + + request = bigtable_table_admin.UpdateBackupRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = table.Backup() + + client.update_backup(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_update_backup_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.UpdateBackupRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'backup': {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'}} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.update_backup(request) + + +def test_update_backup_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = table.Backup() + + # get arguments that satisfy an http rule for this method + sample_request = {'backup': {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'}} + + # get truthy value for each flattened field + mock_args = dict( + backup=table.Backup(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.update_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}" % client.transport._host, args[1]) + + +def test_update_backup_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.update_backup( + bigtable_table_admin.UpdateBackupRequest(), + backup=table.Backup(name='name_value'), + update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), + ) + + +def test_update_backup_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.DeleteBackupRequest, + dict, +]) +def test_delete_backup_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.delete_backup(request) + + # Establish that the response is the type that we expect. + assert response is None + + +def test_delete_backup_rest_required_fields(request_type=bigtable_table_admin.DeleteBackupRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["name"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["name"] = 'name_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).delete_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "name" in jsonified_request + assert jsonified_request["name"] == 'name_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = None + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "delete", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.delete_backup(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_delete_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.delete_backup._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("name", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_delete_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_delete_backup") as pre: + pre.assert_not_called() + pb_message = bigtable_table_admin.DeleteBackupRequest.pb(bigtable_table_admin.DeleteBackupRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + + request = bigtable_table_admin.DeleteBackupRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + + client.delete_backup(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + + +def test_delete_backup_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.DeleteBackupRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.delete_backup(request) + + +def test_delete_backup_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = None + + # get arguments that satisfy an http rule for this method + sample_request = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'} + + # get truthy value for each flattened field + mock_args = dict( + name='name_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = '' + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.delete_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" % client.transport._host, args[1]) + + +def test_delete_backup_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.delete_backup( + bigtable_table_admin.DeleteBackupRequest(), + name='name_value', + ) + + +def test_delete_backup_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.ListBackupsRequest, + dict, +]) +def test_list_backups_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListBackupsResponse( + next_page_token='next_page_token_value', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.list_backups(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, pagers.ListBackupsPager) + assert response.next_page_token == 'next_page_token_value' + + +def test_list_backups_rest_required_fields(request_type=bigtable_table_admin.ListBackupsRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_backups._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).list_backups._get_unset_required_fields(jsonified_request) + # Check that path parameters and body parameters are not mixing in. + assert not set(unset_fields) - set(("filter", "order_by", "page_size", "page_token", )) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListBackupsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "get", + 'query_params': pb_request, + } + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.list_backups(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_list_backups_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.list_backups._get_unset_required_fields({}) + assert set(unset_fields) == (set(("filter", "orderBy", "pageSize", "pageToken", )) & set(("parent", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_list_backups_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_list_backups") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_list_backups") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.ListBackupsRequest.pb(bigtable_table_admin.ListBackupsRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = bigtable_table_admin.ListBackupsResponse.to_json(bigtable_table_admin.ListBackupsResponse()) + + request = bigtable_table_admin.ListBackupsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = bigtable_table_admin.ListBackupsResponse() + + client.list_backups(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_list_backups_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.ListBackupsRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.list_backups(request) + + +def test_list_backups_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = bigtable_table_admin.ListBackupsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.list_backups(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" % client.transport._host, args[1]) + + +def test_list_backups_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.list_backups( + bigtable_table_admin.ListBackupsRequest(), + parent='parent_value', + ) + + +def test_list_backups_rest_pager(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # TODO(kbandes): remove this mock unless there's a good reason for it. + #with mock.patch.object(path_template, 'transcode') as transcode: + # Set the response as a series of pages + response = ( + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + table.Backup(), + ], + next_page_token='abc', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[], + next_page_token='def', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + ], + next_page_token='ghi', + ), + bigtable_table_admin.ListBackupsResponse( + backups=[ + table.Backup(), + table.Backup(), + ], + ), + ) + # Two responses for two calls + response = response + response + + # Wrap the values into proper Response objs + response = tuple(bigtable_table_admin.ListBackupsResponse.to_json(x) for x in response) + return_values = tuple(Response() for i in response) + for return_val, response_val in zip(return_values, response): + return_val._content = response_val.encode('UTF-8') + return_val.status_code = 200 + req.side_effect = return_values + + sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + + pager = client.list_backups(request=sample_request) + + results = list(pager) + assert len(results) == 6 + assert all(isinstance(i, table.Backup) + for i in results) + + pages = list(client.list_backups(request=sample_request).pages) + for page_, token in zip(pages, ['abc','def','ghi', '']): + assert page_.raw_page.next_page_token == token + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.RestoreTableRequest, + dict, +]) +def test_restore_table_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.restore_table(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_restore_table_rest_required_fields(request_type=bigtable_table_admin.RestoreTableRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["table_id"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).restore_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + jsonified_request["tableId"] = 'table_id_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).restore_table._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + assert "tableId" in jsonified_request + assert jsonified_request["tableId"] == 'table_id_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.restore_table(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_restore_table_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.restore_table._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("parent", "tableId", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_restore_table_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_restore_table") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_restore_table") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.RestoreTableRequest.pb(bigtable_table_admin.RestoreTableRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = bigtable_table_admin.RestoreTableRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.restore_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_restore_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.RestoreTableRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.restore_table(request) + + +def test_restore_table_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + bigtable_table_admin.CopyBackupRequest, + dict, +]) +def test_copy_backup_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.copy_backup(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_copy_backup_rest_required_fields(request_type=bigtable_table_admin.CopyBackupRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["backup_id"] = "" + request_init["source_backup"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = 'parent_value' + jsonified_request["backupId"] = 'backup_id_value' + jsonified_request["sourceBackup"] = 'source_backup_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == 'parent_value' + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == 'backup_id_value' + assert "sourceBackup" in jsonified_request + assert jsonified_request["sourceBackup"] == 'source_backup_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.copy_backup(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_copy_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.copy_backup._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("parent", "backupId", "sourceBackup", "expireTime", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_copy_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(operation.Operation, "_set_result_from_operation"), \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_copy_backup") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_copy_backup") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CopyBackupRequest.pb(bigtable_table_admin.CopyBackupRequest()) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) + + request = bigtable_table_admin.CopyBackupRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.copy_backup(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_copy_backup_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.CopyBackupRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.copy_backup(request) + + +def test_copy_backup_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name='operations/spam') + + # get arguments that satisfy an http rule for this method + sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + parent='parent_value', + backup_id='backup_id_value', + source_backup='source_backup_value', + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.copy_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy" % client.transport._host, args[1]) + + +def test_copy_backup_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent='parent_value', + backup_id='backup_id_value', + source_backup='source_backup_value', + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +def test_copy_backup_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + iam_policy_pb2.GetIamPolicyRequest, + dict, +]) +def test_get_iam_policy_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy( + version=774, + etag=b'etag_blob', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.get_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b'etag_blob' + + +def test_get_iam_policy_rest_required_fields(request_type=iam_policy_pb2.GetIamPolicyRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = 'resource_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).get_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == 'resource_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.get_iam_policy(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_get_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.get_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("resource", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_get_iam_policy_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_get_iam_policy") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_get_iam_policy") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.GetIamPolicyRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + + request = iam_policy_pb2.GetIamPolicyRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = policy_pb2.Policy() + + client.get_iam_policy(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=iam_policy_pb2.GetIamPolicyRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.get_iam_policy(request) + + +def test_get_iam_policy_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + resource='resource_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.get_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" % client.transport._host, args[1]) + + +def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.get_iam_policy( + iam_policy_pb2.GetIamPolicyRequest(), + resource='resource_value', + ) + + +def test_get_iam_policy_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + iam_policy_pb2.SetIamPolicyRequest, + dict, +]) +def test_set_iam_policy_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy( + version=774, + etag=b'etag_blob', + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.set_iam_policy(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, policy_pb2.Policy) + assert response.version == 774 + assert response.etag == b'etag_blob' + + +def test_set_iam_policy_rest_required_fields(request_type=iam_policy_pb2.SetIamPolicyRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = 'resource_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).set_iam_policy._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == 'resource_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.set_iam_policy(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_set_iam_policy_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.set_iam_policy._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("resource", "policy", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_set_iam_policy_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_set_iam_policy") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_set_iam_policy") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.SetIamPolicyRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) + + request = iam_policy_pb2.SetIamPolicyRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = policy_pb2.Policy() + + client.set_iam_policy(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=iam_policy_pb2.SetIamPolicyRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.set_iam_policy(request) + + +def test_set_iam_policy_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = policy_pb2.Policy() + + # get arguments that satisfy an http rule for this method + sample_request = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + resource='resource_value', + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.set_iam_policy(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" % client.transport._host, args[1]) + + +def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.set_iam_policy( + iam_policy_pb2.SetIamPolicyRequest(), + resource='resource_value', + ) + + +def test_set_iam_policy_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +@pytest.mark.parametrize("request_type", [ + iam_policy_pb2.TestIamPermissionsRequest, + dict, +]) +def test_test_iam_permissions_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse( + permissions=['permissions_value'], + ) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + response = client.test_iam_permissions(request) + + # Establish that the response is the type that we expect. + assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) + assert response.permissions == ['permissions_value'] + + +def test_test_iam_permissions_rest_required_fields(request_type=iam_policy_pb2.TestIamPermissionsRequest): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["resource"] = "" + request_init["permissions"] = "" + request = request_type(**request_init) + pb_request = request + jsonified_request = json.loads(json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False + )) + + # verify fields with default values are dropped + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["resource"] = 'resource_value' + jsonified_request["permissions"] = 'permissions_value' + + unset_fields = transport_class(credentials=ga_credentials.AnonymousCredentials()).test_iam_permissions._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "resource" in jsonified_request + assert jsonified_request["resource"] == 'resource_value' + assert "permissions" in jsonified_request + assert jsonified_request["permissions"] == 'permissions_value' + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, 'request') as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, 'transcode') as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request + transcode_result = { + 'uri': 'v1/sample_method', + 'method': "post", + 'query_params': pb_request, + } + transcode_result['body'] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + response = client.test_iam_permissions(request) + + expected_params = [ + ('$alt', 'json;enum-encoding=int') + ] + actual_params = req.call_args.kwargs['params'] + assert expected_params == actual_params + + +def test_test_iam_permissions_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport(credentials=ga_credentials.AnonymousCredentials) + + unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) + assert set(unset_fields) == (set(()) & set(("resource", "permissions", ))) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_test_iam_permissions_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object(type(client.transport._session), "request") as req, \ + mock.patch.object(path_template, "transcode") as transcode, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_test_iam_permissions") as post, \ + mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_test_iam_permissions") as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = iam_policy_pb2.TestIamPermissionsRequest() + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson(iam_policy_pb2.TestIamPermissionsResponse()) + + request = iam_policy_pb2.TestIamPermissionsRequest() + metadata =[ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = iam_policy_pb2.TestIamPermissionsResponse() + + client.test_iam_permissions(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) + + pre.assert_called_once() + post.assert_called_once() + + +def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=iam_policy_pb2.TestIamPermissionsRequest): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.test_iam_permissions(request) + + +def test_test_iam_permissions_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), 'request') as req: + # Designate an appropriate value for the returned response. + return_value = iam_policy_pb2.TestIamPermissionsResponse() + + # get arguments that satisfy an http rule for this method + sample_request = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} + + # get truthy value for each flattened field + mock_args = dict( + resource='resource_value', + permissions=['permissions_value'], + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode('UTF-8') + req.return_value = response_value + + client.test_iam_permissions(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate("%s/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" % client.transport._host, args[1]) + + +def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.test_iam_permissions( + iam_policy_pb2.TestIamPermissionsRequest(), + resource='resource_value', + permissions=['permissions_value'], + ) + + +def test_test_iam_permissions_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest' + ) + + +def test_credentials_transport_error(): + # It is an error to provide credentials and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # It is an error to provide a credentials file and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options={"credentials_file": "credentials.json"}, + transport=transport, + ) + + # It is an error to provide an api_key and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + options = client_options.ClientOptions() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options=options, + transport=transport, + ) + + # It is an error to provide an api_key and a credential. + options = mock.Mock() + options.api_key = "api_key" + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options=options, + credentials=ga_credentials.AnonymousCredentials() + ) + + # It is an error to provide scopes and a transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + with pytest.raises(ValueError): + client = BigtableTableAdminClient( + client_options={"scopes": ["1", "2"]}, + transport=transport, + ) + + +def test_transport_instance(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + client = BigtableTableAdminClient(transport=transport) + assert client.transport is transport + +def test_transport_get_channel(): + # A client may be instantiated with a custom transport instance. + transport = transports.BigtableTableAdminGrpcTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + + transport = transports.BigtableTableAdminGrpcAsyncIOTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + channel = transport.grpc_channel + assert channel + +@pytest.mark.parametrize("transport_class", [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + transports.BigtableTableAdminRestTransport, +]) +def test_transport_adc(transport_class): + # Test default credentials are used if not provided. + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class() + adc.assert_called_once() + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "rest", +]) +def test_transport_kind(transport_name): + transport = BigtableTableAdminClient.get_transport_class(transport_name)( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert transport.kind == transport_name + +def test_transport_grpc_default(): + # A client should use the gRPC transport by default. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + assert isinstance( + client.transport, + transports.BigtableTableAdminGrpcTransport, + ) + +def test_bigtable_table_admin_base_transport_error(): + # Passing both a credentials object and credentials_file should raise an error + with pytest.raises(core_exceptions.DuplicateCredentialArgs): + transport = transports.BigtableTableAdminTransport( + credentials=ga_credentials.AnonymousCredentials(), + credentials_file="credentials.json" + ) + + +def test_bigtable_table_admin_base_transport(): + # Instantiate the base transport. + with mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport.__init__') as Transport: + Transport.return_value = None + transport = transports.BigtableTableAdminTransport( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Every method on the transport should just blindly + # raise NotImplementedError. + methods = ( + 'create_table', + 'create_table_from_snapshot', + 'list_tables', + 'get_table', + 'update_table', + 'delete_table', + 'undelete_table', + 'modify_column_families', + 'drop_row_range', + 'generate_consistency_token', + 'check_consistency', + 'snapshot_table', + 'get_snapshot', + 'list_snapshots', + 'delete_snapshot', + 'create_backup', + 'get_backup', + 'update_backup', + 'delete_backup', + 'list_backups', + 'restore_table', + 'copy_backup', + 'get_iam_policy', + 'set_iam_policy', + 'test_iam_permissions', + ) + for method in methods: + with pytest.raises(NotImplementedError): + getattr(transport, method)(request=object()) + + with pytest.raises(NotImplementedError): + transport.close() + + # Additionally, the LRO client (a property) should + # also raise NotImplementedError + with pytest.raises(NotImplementedError): + transport.operations_client + + # Catch all for all remaining methods and properties + remainder = [ + 'kind', + ] + for r in remainder: + with pytest.raises(NotImplementedError): + getattr(transport, r)() + + +def test_bigtable_table_admin_base_transport_with_credentials_file(): + # Instantiate the base transport with a credentials file + with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BigtableTableAdminTransport( + credentials_file="credentials.json", + quota_project_id="octopus", + ) + load_creds.assert_called_once_with("credentials.json", + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + quota_project_id="octopus", + ) + + +def test_bigtable_table_admin_base_transport_with_adc(): + # Test the default credentials are used if credentials and credentials_file are None. + with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages') as Transport: + Transport.return_value = None + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport = transports.BigtableTableAdminTransport() + adc.assert_called_once() + + +def test_bigtable_table_admin_auth_adc(): + # If no credentials are provided, we should use ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + BigtableTableAdminClient() + adc.assert_called_once_with( + scopes=None, + default_scopes=( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + quota_project_id=None, + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + ], +) +def test_bigtable_table_admin_transport_auth_adc(transport_class): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + adc.return_value = (ga_credentials.AnonymousCredentials(), None) + transport_class(quota_project_id="octopus", scopes=["1", "2"]) + adc.assert_called_once_with( + scopes=["1", "2"], + default_scopes=( 'https://www.googleapis.com/auth/bigtable.admin', 'https://www.googleapis.com/auth/bigtable.admin.table', 'https://www.googleapis.com/auth/cloud-bigtable.admin', 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/cloud-platform.read-only',), + quota_project_id="octopus", + ) + + +@pytest.mark.parametrize( + "transport_class", + [ + transports.BigtableTableAdminGrpcTransport, + transports.BigtableTableAdminGrpcAsyncIOTransport, + transports.BigtableTableAdminRestTransport, + ], +) +def test_bigtable_table_admin_transport_auth_gdch_credentials(transport_class): + host = 'https://language.com' + api_audience_tests = [None, 'https://language2.com'] + api_audience_expect = [host, 'https://language2.com'] + for t, e in zip(api_audience_tests, api_audience_expect): + with mock.patch.object(google.auth, 'default', autospec=True) as adc: + gdch_mock = mock.MagicMock() + type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) + adc.return_value = (gdch_mock, None) + transport_class(host=host, api_audience=t) + gdch_mock.with_gdch_audience.assert_called_once_with( + e + ) + + +@pytest.mark.parametrize( + "transport_class,grpc_helpers", + [ + (transports.BigtableTableAdminGrpcTransport, grpc_helpers), + (transports.BigtableTableAdminGrpcAsyncIOTransport, grpc_helpers_async) + ], +) +def test_bigtable_table_admin_transport_create_channel(transport_class, grpc_helpers): + # If credentials and host are not provided, the transport class should use + # ADC credentials. + with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( + grpc_helpers, "create_channel", autospec=True + ) as create_channel: + creds = ga_credentials.AnonymousCredentials() + adc.return_value = (creds, None) + transport_class( + quota_project_id="octopus", + scopes=["1", "2"] + ) + + create_channel.assert_called_with( + "bigtableadmin.googleapis.com:443", + credentials=creds, + credentials_file=None, + quota_project_id="octopus", + default_scopes=( + 'https://www.googleapis.com/auth/bigtable.admin', + 'https://www.googleapis.com/auth/bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-bigtable.admin', + 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', + 'https://www.googleapis.com/auth/cloud-platform', + 'https://www.googleapis.com/auth/cloud-platform.read-only', +), + scopes=["1", "2"], + default_host="bigtableadmin.googleapis.com", + ssl_credentials=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + +@pytest.mark.parametrize("transport_class", [transports.BigtableTableAdminGrpcTransport, transports.BigtableTableAdminGrpcAsyncIOTransport]) +def test_bigtable_table_admin_grpc_transport_client_cert_source_for_mtls( + transport_class +): + cred = ga_credentials.AnonymousCredentials() + + # Check ssl_channel_credentials is used if provided. + with mock.patch.object(transport_class, "create_channel") as mock_create_channel: + mock_ssl_channel_creds = mock.Mock() + transport_class( + host="squid.clam.whelk", + credentials=cred, + ssl_channel_credentials=mock_ssl_channel_creds + ) + mock_create_channel.assert_called_once_with( + "squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_channel_creds, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + + # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls + # is used. + with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): + with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: + transport_class( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + expected_cert, expected_key = client_cert_source_callback() + mock_ssl_cred.assert_called_once_with( + certificate_chain=expected_cert, + private_key=expected_key + ) + +def test_bigtable_table_admin_http_transport_client_cert_source_for_mtls(): + cred = ga_credentials.AnonymousCredentials() + with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: + transports.BigtableTableAdminRestTransport ( + credentials=cred, + client_cert_source_for_mtls=client_cert_source_callback + ) + mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) + + +def test_bigtable_table_admin_rest_lro_client(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='rest', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.AbstractOperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_bigtable_table_admin_host_no_port(transport_name): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='bigtableadmin.googleapis.com'), + transport=transport_name, + ) + assert client.transport._host == ( + 'bigtableadmin.googleapis.com:443' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://bigtableadmin.googleapis.com' + ) + +@pytest.mark.parametrize("transport_name", [ + "grpc", + "grpc_asyncio", + "rest", +]) +def test_bigtable_table_admin_host_with_port(transport_name): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + client_options=client_options.ClientOptions(api_endpoint='bigtableadmin.googleapis.com:8000'), + transport=transport_name, + ) + assert client.transport._host == ( + 'bigtableadmin.googleapis.com:8000' + if transport_name in ['grpc', 'grpc_asyncio'] + else 'https://bigtableadmin.googleapis.com:8000' + ) + +@pytest.mark.parametrize("transport_name", [ + "rest", +]) +def test_bigtable_table_admin_client_transport_session_collision(transport_name): + creds1 = ga_credentials.AnonymousCredentials() + creds2 = ga_credentials.AnonymousCredentials() + client1 = BigtableTableAdminClient( + credentials=creds1, + transport=transport_name, + ) + client2 = BigtableTableAdminClient( + credentials=creds2, + transport=transport_name, + ) + session1 = client1.transport.create_table._session + session2 = client2.transport.create_table._session + assert session1 != session2 + session1 = client1.transport.create_table_from_snapshot._session + session2 = client2.transport.create_table_from_snapshot._session + assert session1 != session2 + session1 = client1.transport.list_tables._session + session2 = client2.transport.list_tables._session + assert session1 != session2 + session1 = client1.transport.get_table._session + session2 = client2.transport.get_table._session + assert session1 != session2 + session1 = client1.transport.update_table._session + session2 = client2.transport.update_table._session + assert session1 != session2 + session1 = client1.transport.delete_table._session + session2 = client2.transport.delete_table._session + assert session1 != session2 + session1 = client1.transport.undelete_table._session + session2 = client2.transport.undelete_table._session + assert session1 != session2 + session1 = client1.transport.modify_column_families._session + session2 = client2.transport.modify_column_families._session + assert session1 != session2 + session1 = client1.transport.drop_row_range._session + session2 = client2.transport.drop_row_range._session + assert session1 != session2 + session1 = client1.transport.generate_consistency_token._session + session2 = client2.transport.generate_consistency_token._session + assert session1 != session2 + session1 = client1.transport.check_consistency._session + session2 = client2.transport.check_consistency._session + assert session1 != session2 + session1 = client1.transport.snapshot_table._session + session2 = client2.transport.snapshot_table._session + assert session1 != session2 + session1 = client1.transport.get_snapshot._session + session2 = client2.transport.get_snapshot._session + assert session1 != session2 + session1 = client1.transport.list_snapshots._session + session2 = client2.transport.list_snapshots._session + assert session1 != session2 + session1 = client1.transport.delete_snapshot._session + session2 = client2.transport.delete_snapshot._session + assert session1 != session2 + session1 = client1.transport.create_backup._session + session2 = client2.transport.create_backup._session + assert session1 != session2 + session1 = client1.transport.get_backup._session + session2 = client2.transport.get_backup._session + assert session1 != session2 + session1 = client1.transport.update_backup._session + session2 = client2.transport.update_backup._session + assert session1 != session2 + session1 = client1.transport.delete_backup._session + session2 = client2.transport.delete_backup._session + assert session1 != session2 + session1 = client1.transport.list_backups._session + session2 = client2.transport.list_backups._session + assert session1 != session2 + session1 = client1.transport.restore_table._session + session2 = client2.transport.restore_table._session + assert session1 != session2 + session1 = client1.transport.copy_backup._session + session2 = client2.transport.copy_backup._session + assert session1 != session2 + session1 = client1.transport.get_iam_policy._session + session2 = client2.transport.get_iam_policy._session + assert session1 != session2 + session1 = client1.transport.set_iam_policy._session + session2 = client2.transport.set_iam_policy._session + assert session1 != session2 + session1 = client1.transport.test_iam_permissions._session + session2 = client2.transport.test_iam_permissions._session + assert session1 != session2 +def test_bigtable_table_admin_grpc_transport_channel(): + channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableTableAdminGrpcTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +def test_bigtable_table_admin_grpc_asyncio_transport_channel(): + channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) + + # Check that channel is used if provided. + transport = transports.BigtableTableAdminGrpcAsyncIOTransport( + host="squid.clam.whelk", + channel=channel, + ) + assert transport.grpc_channel == channel + assert transport._host == "squid.clam.whelk:443" + assert transport._ssl_channel_credentials == None + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.BigtableTableAdminGrpcTransport, transports.BigtableTableAdminGrpcAsyncIOTransport]) +def test_bigtable_table_admin_transport_channel_mtls_with_client_cert_source( + transport_class +): + with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_ssl_cred = mock.Mock() + grpc_ssl_channel_cred.return_value = mock_ssl_cred + + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + + cred = ga_credentials.AnonymousCredentials() + with pytest.warns(DeprecationWarning): + with mock.patch.object(google.auth, 'default') as adc: + adc.return_value = (cred, None) + transport = transport_class( + host="squid.clam.whelk", + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=client_cert_source_callback, + ) + adc.assert_called_once() + + grpc_ssl_channel_cred.assert_called_once_with( + certificate_chain=b"cert bytes", private_key=b"key bytes" + ) + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + assert transport._ssl_channel_credentials == mock_ssl_cred + + +# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are +# removed from grpc/grpc_asyncio transport constructor. +@pytest.mark.parametrize("transport_class", [transports.BigtableTableAdminGrpcTransport, transports.BigtableTableAdminGrpcAsyncIOTransport]) +def test_bigtable_table_admin_transport_channel_mtls_with_adc( + transport_class +): + mock_ssl_cred = mock.Mock() + with mock.patch.multiple( + "google.auth.transport.grpc.SslCredentials", + __init__=mock.Mock(return_value=None), + ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), + ): + with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: + mock_grpc_channel = mock.Mock() + grpc_create_channel.return_value = mock_grpc_channel + mock_cred = mock.Mock() + + with pytest.warns(DeprecationWarning): + transport = transport_class( + host="squid.clam.whelk", + credentials=mock_cred, + api_mtls_endpoint="mtls.squid.clam.whelk", + client_cert_source=None, + ) + + grpc_create_channel.assert_called_once_with( + "mtls.squid.clam.whelk:443", + credentials=mock_cred, + credentials_file=None, + scopes=None, + ssl_credentials=mock_ssl_cred, + quota_project_id=None, + options=[ + ("grpc.max_send_message_length", -1), + ("grpc.max_receive_message_length", -1), + ], + ) + assert transport.grpc_channel == mock_grpc_channel + + +def test_bigtable_table_admin_grpc_lro_client(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_bigtable_table_admin_grpc_lro_async_client(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport='grpc_asyncio', + ) + transport = client.transport + + # Ensure that we have a api-core operations client. + assert isinstance( + transport.operations_client, + operations_v1.OperationsAsyncClient, + ) + + # Ensure that subsequent calls to the property send the exact same object. + assert transport.operations_client is transport.operations_client + + +def test_backup_path(): + project = "squid" + instance = "clam" + cluster = "whelk" + backup = "octopus" + expected = "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format(project=project, instance=instance, cluster=cluster, backup=backup, ) + actual = BigtableTableAdminClient.backup_path(project, instance, cluster, backup) + assert expected == actual + + +def test_parse_backup_path(): + expected = { + "project": "oyster", + "instance": "nudibranch", + "cluster": "cuttlefish", + "backup": "mussel", + } + path = BigtableTableAdminClient.backup_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_backup_path(path) + assert expected == actual + +def test_cluster_path(): + project = "winkle" + instance = "nautilus" + cluster = "scallop" + expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format(project=project, instance=instance, cluster=cluster, ) + actual = BigtableTableAdminClient.cluster_path(project, instance, cluster) + assert expected == actual + + +def test_parse_cluster_path(): + expected = { + "project": "abalone", + "instance": "squid", + "cluster": "clam", + } + path = BigtableTableAdminClient.cluster_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_cluster_path(path) + assert expected == actual + +def test_crypto_key_version_path(): + project = "whelk" + location = "octopus" + key_ring = "oyster" + crypto_key = "nudibranch" + crypto_key_version = "cuttlefish" + expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format(project=project, location=location, key_ring=key_ring, crypto_key=crypto_key, crypto_key_version=crypto_key_version, ) + actual = BigtableTableAdminClient.crypto_key_version_path(project, location, key_ring, crypto_key, crypto_key_version) + assert expected == actual + + +def test_parse_crypto_key_version_path(): + expected = { + "project": "mussel", + "location": "winkle", + "key_ring": "nautilus", + "crypto_key": "scallop", + "crypto_key_version": "abalone", + } + path = BigtableTableAdminClient.crypto_key_version_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_crypto_key_version_path(path) + assert expected == actual + +def test_instance_path(): + project = "squid" + instance = "clam" + expected = "projects/{project}/instances/{instance}".format(project=project, instance=instance, ) + actual = BigtableTableAdminClient.instance_path(project, instance) + assert expected == actual + + +def test_parse_instance_path(): + expected = { + "project": "whelk", + "instance": "octopus", + } + path = BigtableTableAdminClient.instance_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_instance_path(path) + assert expected == actual + +def test_snapshot_path(): + project = "oyster" + instance = "nudibranch" + cluster = "cuttlefish" + snapshot = "mussel" + expected = "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format(project=project, instance=instance, cluster=cluster, snapshot=snapshot, ) + actual = BigtableTableAdminClient.snapshot_path(project, instance, cluster, snapshot) + assert expected == actual + + +def test_parse_snapshot_path(): + expected = { + "project": "winkle", + "instance": "nautilus", + "cluster": "scallop", + "snapshot": "abalone", + } + path = BigtableTableAdminClient.snapshot_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_snapshot_path(path) + assert expected == actual + +def test_table_path(): + project = "squid" + instance = "clam" + table = "whelk" + expected = "projects/{project}/instances/{instance}/tables/{table}".format(project=project, instance=instance, table=table, ) + actual = BigtableTableAdminClient.table_path(project, instance, table) + assert expected == actual + + +def test_parse_table_path(): + expected = { + "project": "octopus", + "instance": "oyster", + "table": "nudibranch", + } + path = BigtableTableAdminClient.table_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_table_path(path) + assert expected == actual + +def test_common_billing_account_path(): + billing_account = "cuttlefish" + expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) + actual = BigtableTableAdminClient.common_billing_account_path(billing_account) + assert expected == actual + + +def test_parse_common_billing_account_path(): + expected = { + "billing_account": "mussel", + } + path = BigtableTableAdminClient.common_billing_account_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_billing_account_path(path) + assert expected == actual + +def test_common_folder_path(): + folder = "winkle" + expected = "folders/{folder}".format(folder=folder, ) + actual = BigtableTableAdminClient.common_folder_path(folder) + assert expected == actual + + +def test_parse_common_folder_path(): + expected = { + "folder": "nautilus", + } + path = BigtableTableAdminClient.common_folder_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_folder_path(path) + assert expected == actual + +def test_common_organization_path(): + organization = "scallop" + expected = "organizations/{organization}".format(organization=organization, ) + actual = BigtableTableAdminClient.common_organization_path(organization) + assert expected == actual + + +def test_parse_common_organization_path(): + expected = { + "organization": "abalone", + } + path = BigtableTableAdminClient.common_organization_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_organization_path(path) + assert expected == actual + +def test_common_project_path(): + project = "squid" + expected = "projects/{project}".format(project=project, ) + actual = BigtableTableAdminClient.common_project_path(project) + assert expected == actual + + +def test_parse_common_project_path(): + expected = { + "project": "clam", + } + path = BigtableTableAdminClient.common_project_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_project_path(path) + assert expected == actual + +def test_common_location_path(): + project = "whelk" + location = "octopus" + expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) + actual = BigtableTableAdminClient.common_location_path(project, location) + assert expected == actual + + +def test_parse_common_location_path(): + expected = { + "project": "oyster", + "location": "nudibranch", + } + path = BigtableTableAdminClient.common_location_path(**expected) + + # Check that the path construction is reversible. + actual = BigtableTableAdminClient.parse_common_location_path(path) + assert expected == actual + + +def test_client_with_default_client_info(): + client_info = gapic_v1.client_info.ClientInfo() + + with mock.patch.object(transports.BigtableTableAdminTransport, '_prep_wrapped_messages') as prep: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + + with mock.patch.object(transports.BigtableTableAdminTransport, '_prep_wrapped_messages') as prep: + transport_class = BigtableTableAdminClient.get_transport_class() + transport = transport_class( + credentials=ga_credentials.AnonymousCredentials(), + client_info=client_info, + ) + prep.assert_called_once_with(client_info) + +@pytest.mark.asyncio +async def test_transport_close_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc_asyncio", + ) + with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: + async with client: + close.assert_not_called() + close.assert_called_once() + + +def test_transport_close(): + transports = { + "rest": "_session", + "grpc": "_grpc_channel", + } + + for transport, close_name in transports.items(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: + with client: + close.assert_not_called() + close.assert_called_once() + +def test_client_ctx(): + transports = [ + 'rest', + 'grpc', + ] + for transport in transports: + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport + ) + # Test client calls underlying transport. + with mock.patch.object(type(client.transport), "close") as close: + close.assert_not_called() + with client: + pass + close.assert_called() + +@pytest.mark.parametrize("client_class,transport_class", [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport), + (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport), +]) +def test_api_key_credentials(client_class, transport_class): + with mock.patch.object( + google.auth._default, "get_api_key_credentials", create=True + ) as get_api_key_credentials: + mock_cred = mock.Mock() + get_api_key_credentials.return_value = mock_cred + options = client_options.ClientOptions() + options.api_key = "api_key" + with mock.patch.object(transport_class, "__init__") as patched: + patched.return_value = None + client = client_class(client_options=options) + patched.assert_called_once_with( + credentials=mock_cred, + credentials_file=None, + host=client.DEFAULT_ENDPOINT, + scopes=None, + client_cert_source_for_mtls=None, + quota_project_id=None, + client_info=transports.base.DEFAULT_CLIENT_INFO, + always_use_jwt_access=True, + api_audience=None, + )