From 748310b020a8d55f0c2fbd3828948745b27e57ef Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 16 Mar 2023 08:01:31 -0400 Subject: [PATCH 01/52] chore(deps): Update nox in .kokoro/requirements.in [autoapprove] (#750) Source-Link: https://github.com/googleapis/synthtool/commit/92006bb3cdc84677aa93c7f5235424ec2b157146 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:2e247c7bf5154df7f98cce087a20ca7605e236340c7d6d1a14447e5c06791bd6 Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 2 +- .kokoro/requirements.in | 2 +- .kokoro/requirements.txt | 14 +++++--------- 3 files changed, 7 insertions(+), 11 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 5fc5daa31..b8edda51c 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:8555f0e37e6261408f792bfd6635102d2da5ad73f8f09bcb24f25e6afb5fac97 + digest: sha256:2e247c7bf5154df7f98cce087a20ca7605e236340c7d6d1a14447e5c06791bd6 diff --git a/.kokoro/requirements.in b/.kokoro/requirements.in index 882178ce6..ec867d9fd 100644 --- a/.kokoro/requirements.in +++ b/.kokoro/requirements.in @@ -5,6 +5,6 @@ typing-extensions twine wheel setuptools -nox +nox>=2022.11.21 # required to remove dependency on py charset-normalizer<3 click<8.1.0 diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index fa99c1290..66a2172a7 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -1,6 +1,6 @@ # -# This file is autogenerated by pip-compile with python 3.10 -# To update, run: +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: # # pip-compile --allow-unsafe --generate-hashes requirements.in # @@ -335,9 +335,9 @@ more-itertools==9.0.0 \ --hash=sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41 \ --hash=sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab # via jaraco-classes -nox==2022.8.7 \ - --hash=sha256:1b894940551dc5c389f9271d197ca5d655d40bdc6ccf93ed6880e4042760a34b \ - --hash=sha256:96cca88779e08282a699d672258ec01eb7c792d35bbbf538c723172bce23212c +nox==2022.11.21 \ + --hash=sha256:0e41a990e290e274cb205a976c4c97ee3c5234441a8132c8c3fd9ea3c22149eb \ + --hash=sha256:e21c31de0711d1274ca585a2c5fde36b1aa962005ba8e9322bf5eeed16dcd684 # via -r requirements.in packaging==21.3 \ --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ @@ -380,10 +380,6 @@ protobuf==3.20.3 \ # gcp-docuploader # gcp-releasetool # google-api-core -py==1.11.0 \ - --hash=sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719 \ - --hash=sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378 - # via nox pyasn1==0.4.8 \ --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \ --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba From 45d3e4308c4f494228c2e6e18a36285c557cb0c3 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 23 Mar 2023 09:44:02 -0400 Subject: [PATCH 02/52] docs: Fix formatting of request arg in docstring (#756) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: Fix formatting of request arg in docstring chore: Update gapic-generator-python to v1.9.1 PiperOrigin-RevId: 518604533 Source-Link: https://github.com/googleapis/googleapis/commit/8a085aeddfa010af5bcef090827aac5255383d7e Source-Link: https://github.com/googleapis/googleapis-gen/commit/b2ab4b0a0ae2907e812c209198a74e0898afcb04 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYjJhYjRiMGEwYWUyOTA3ZTgxMmMyMDkxOThhNzRlMDg5OGFmY2IwNCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 13 +++++------- .../bigtable_instance_admin/client.py | 13 +++++------- .../transports/rest.py | 18 ---------------- .../bigtable_table_admin/async_client.py | 14 +++++++------ .../services/bigtable_table_admin/client.py | 14 +++++++------ .../bigtable_table_admin/transports/rest.py | 21 ------------------- .../services/bigtable/async_client.py | 15 ++++++------- .../bigtable_v2/services/bigtable/client.py | 15 ++++++------- .../services/bigtable/transports/rest.py | 9 -------- 9 files changed, 42 insertions(+), 90 deletions(-) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index ddeaf979a..12811bcea 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1137,8 +1137,8 @@ async def update_cluster( Args: request (Optional[Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]]): - The request object. A resizable group of nodes in a - particular cloud location, capable of serving all + The request object. A resizable group of nodes in a particular cloud + location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -1880,8 +1880,7 @@ async def get_iam_policy( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): - The request object. Request message for `GetIamPolicy` - method. + The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the policy is being requested. See the @@ -2030,8 +2029,7 @@ async def set_iam_policy( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): - The request object. Request message for `SetIamPolicy` - method. + The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the policy is being specified. See the @@ -2171,8 +2169,7 @@ async def test_iam_permissions( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): - The request object. Request message for - `TestIamPermissions` method. + The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the policy detail is being requested. See diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index fcb767a3d..ecc9bf1e2 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -1400,8 +1400,8 @@ def update_cluster( Args: request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]): - The request object. A resizable group of nodes in a - particular cloud location, capable of serving all + The request object. A resizable group of nodes in a particular cloud + location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. retry (google.api_core.retry.Retry): Designation of what errors, if any, @@ -2104,8 +2104,7 @@ def get_iam_policy( Args: request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): - The request object. Request message for `GetIamPolicy` - method. + The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the policy is being requested. See the @@ -2241,8 +2240,7 @@ def set_iam_policy( Args: request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): - The request object. Request message for `SetIamPolicy` - method. + The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the policy is being specified. See the @@ -2379,8 +2377,7 @@ def test_iam_permissions( Args: request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): - The request object. Request message for - `TestIamPermissions` method. + The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the policy detail is being requested. See diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 5ae9600a9..e9b94cf78 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -874,7 +874,6 @@ def __call__( request (~.bigtable_instance_admin.CreateAppProfileRequest): The request object. Request message for BigtableInstanceAdmin.CreateAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -978,7 +977,6 @@ def __call__( request (~.bigtable_instance_admin.CreateClusterRequest): The request object. Request message for BigtableInstanceAdmin.CreateCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1076,7 +1074,6 @@ def __call__( request (~.bigtable_instance_admin.CreateInstanceRequest): The request object. Request message for BigtableInstanceAdmin.CreateInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1176,7 +1173,6 @@ def __call__( request (~.bigtable_instance_admin.DeleteAppProfileRequest): The request object. Request message for BigtableInstanceAdmin.DeleteAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1254,7 +1250,6 @@ def __call__( request (~.bigtable_instance_admin.DeleteClusterRequest): The request object. Request message for BigtableInstanceAdmin.DeleteCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1330,7 +1325,6 @@ def __call__( request (~.bigtable_instance_admin.DeleteInstanceRequest): The request object. Request message for BigtableInstanceAdmin.DeleteInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1406,7 +1400,6 @@ def __call__( request (~.bigtable_instance_admin.GetAppProfileRequest): The request object. Request message for BigtableInstanceAdmin.GetAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1497,7 +1490,6 @@ def __call__( request (~.bigtable_instance_admin.GetClusterRequest): The request object. Request message for BigtableInstanceAdmin.GetCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1759,7 +1751,6 @@ def __call__( request (~.bigtable_instance_admin.GetInstanceRequest): The request object. Request message for BigtableInstanceAdmin.GetInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1853,7 +1844,6 @@ def __call__( request (~.bigtable_instance_admin.ListAppProfilesRequest): The request object. Request message for BigtableInstanceAdmin.ListAppProfiles. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1945,7 +1935,6 @@ def __call__( request (~.bigtable_instance_admin.ListClustersRequest): The request object. Request message for BigtableInstanceAdmin.ListClusters. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2035,7 +2024,6 @@ def __call__( request (~.bigtable_instance_admin.ListHotTabletsRequest): The request object. Request message for BigtableInstanceAdmin.ListHotTablets. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2127,7 +2115,6 @@ def __call__( request (~.bigtable_instance_admin.ListInstancesRequest): The request object. Request message for BigtableInstanceAdmin.ListInstances. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2219,7 +2206,6 @@ def __call__( request (~.bigtable_instance_admin.PartialUpdateClusterRequest): The request object. Request message for BigtableInstanceAdmin.PartialUpdateCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2321,7 +2307,6 @@ def __call__( request (~.bigtable_instance_admin.PartialUpdateInstanceRequest): The request object. Request message for BigtableInstanceAdmin.PartialUpdateInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2692,7 +2677,6 @@ def __call__( request (~.bigtable_instance_admin.UpdateAppProfileRequest): The request object. Request message for BigtableInstanceAdmin.UpdateAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2784,7 +2768,6 @@ def __call__( location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2885,7 +2868,6 @@ def __call__( served from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index bc85e5c5d..91f059f8b 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -369,6 +369,7 @@ async def create_table_from_snapshot( request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1300,6 +1301,7 @@ async def snapshot_table( request (Optional[Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1437,6 +1439,7 @@ async def get_snapshot( request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1549,6 +1552,7 @@ async def list_snapshots( request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1672,6 +1676,7 @@ async def delete_snapshot( request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -2290,8 +2295,7 @@ async def get_iam_policy( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): - The request object. Request message for `GetIamPolicy` - method. + The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the policy is being requested. See the @@ -2440,8 +2444,7 @@ async def set_iam_policy( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): - The request object. Request message for `SetIamPolicy` - method. + The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the policy is being specified. See the @@ -2581,8 +2584,7 @@ async def test_iam_permissions( Args: request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): - The request object. Request message for - `TestIamPermissions` method. + The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the policy detail is being requested. See diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index aa7eaa197..efceae90a 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -696,6 +696,7 @@ def create_table_from_snapshot( request (Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1594,6 +1595,7 @@ def snapshot_table( request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1731,6 +1733,7 @@ def get_snapshot( request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1833,6 +1836,7 @@ def list_snapshots( request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1946,6 +1950,7 @@ def delete_snapshot( request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -2545,8 +2550,7 @@ def get_iam_policy( Args: request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): - The request object. Request message for `GetIamPolicy` - method. + The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the policy is being requested. See the @@ -2682,8 +2686,7 @@ def set_iam_policy( Args: request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): - The request object. Request message for `SetIamPolicy` - method. + The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the policy is being specified. See the @@ -2820,8 +2823,7 @@ def test_iam_permissions( Args: request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): - The request object. Request message for - `TestIamPermissions` method. + The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the policy detail is being requested. See diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 5c25ac556..4d5b2ed1c 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -938,7 +938,6 @@ def __call__( request (~.bigtable_table_admin.CheckConsistencyRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1041,7 +1040,6 @@ def __call__( request (~.bigtable_table_admin.CreateBackupRequest): The request object. The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1139,7 +1137,6 @@ def __call__( request (~.bigtable_table_admin.CreateTableRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1248,7 +1245,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1348,7 +1344,6 @@ def __call__( request (~.bigtable_table_admin.DeleteBackupRequest): The request object. The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1431,7 +1426,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1507,7 +1501,6 @@ def __call__( request (~.bigtable_table_admin.DeleteTableRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1583,7 +1576,6 @@ def __call__( request (~.bigtable_table_admin.DropRowRangeRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1669,7 +1661,6 @@ def __call__( request (~.bigtable_table_admin.GenerateConsistencyTokenRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1772,7 +1763,6 @@ def __call__( request (~.bigtable_table_admin.GetBackupRequest): The request object. The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2042,7 +2032,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2142,7 +2131,6 @@ def __call__( request (~.bigtable_table_admin.GetTableRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2234,7 +2222,6 @@ def __call__( request (~.bigtable_table_admin.ListBackupsRequest): The request object. The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2331,7 +2318,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2428,7 +2414,6 @@ def __call__( request (~.bigtable_table_admin.ListTablesRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2518,7 +2503,6 @@ def __call__( request (~.bigtable_table_admin.ModifyColumnFamiliesRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2621,7 +2605,6 @@ def __call__( request (~.bigtable_table_admin.RestoreTableRequest): The request object. The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -2901,7 +2884,6 @@ def __call__( changed in backward-incompatible ways and is not recommended for production use. It is not subject to any SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3101,7 +3083,6 @@ def __call__( request (~.bigtable_table_admin.UndeleteTableRequest): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3201,7 +3182,6 @@ def __call__( request (~.bigtable_table_admin.UpdateBackupRequest): The request object. The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -3300,7 +3280,6 @@ def __call__( request (~.bigtable_table_admin.UpdateTableRequest): The request object. The request for [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py index 3465569b3..1233e1288 100644 --- a/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -807,8 +807,8 @@ async def ping_and_warm( Args: request (Optional[Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]]): - The request object. Request message for client - connection keep-alive and warming. + The request object. Request message for client connection + keep-alive and warming. name (:class:`str`): Required. The unique name of the instance to check permissions for as well as respond. Values are of the @@ -1027,8 +1027,9 @@ def generate_initial_change_stream_partitions( Args: request (Optional[Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]]): - The request object. NOTE: This API is intended to be - used by Apache Beam BigtableIO. Request message for + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (:class:`str`): Required. The unique name of the table from which to get @@ -1126,9 +1127,9 @@ def read_change_stream( Args: request (Optional[Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]]): - The request object. NOTE: This API is intended to be - used by Apache Beam BigtableIO. Request message for - Bigtable.ReadChangeStream. + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. table_name (:class:`str`): Required. The unique name of the table from which to read a change stream. Values are of the form diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py index 37ab65fe2..38618fa31 100644 --- a/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1091,8 +1091,8 @@ def ping_and_warm( Args: request (Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]): - The request object. Request message for client - connection keep-alive and warming. + The request object. Request message for client connection + keep-alive and warming. name (str): Required. The unique name of the instance to check permissions for as well as respond. Values are of the @@ -1327,8 +1327,9 @@ def generate_initial_change_stream_partitions( Args: request (Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]): - The request object. NOTE: This API is intended to be - used by Apache Beam BigtableIO. Request message for + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (str): Required. The unique name of the table from which to get @@ -1430,9 +1431,9 @@ def read_change_stream( Args: request (Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]): - The request object. NOTE: This API is intended to be - used by Apache Beam BigtableIO. Request message for - Bigtable.ReadChangeStream. + The request object. NOTE: This API is intended to be used + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. table_name (str): Required. The unique name of the table from which to read a change stream. Values are of the form diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index ee9cb046f..4343fbb90 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -471,7 +471,6 @@ def __call__( request (~.bigtable.CheckAndMutateRowRequest): The request object. Request message for Bigtable.CheckAndMutateRow. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -575,7 +574,6 @@ def __call__( by Apache Beam BigtableIO. Request message for Bigtable.GenerateInitialChangeStreamPartitions. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -684,7 +682,6 @@ def __call__( request (~.bigtable.MutateRowRequest): The request object. Request message for Bigtable.MutateRow. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -783,7 +780,6 @@ def __call__( request (~.bigtable.MutateRowsRequest): The request object. Request message for BigtableService.MutateRows. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -881,7 +877,6 @@ def __call__( request (~.bigtable.PingAndWarmRequest): The request object. Request message for client connection keep-alive and warming. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -982,7 +977,6 @@ def __call__( The request object. NOTE: This API is intended to be used by Apache Beam BigtableIO. Request message for Bigtable.ReadChangeStream. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1083,7 +1077,6 @@ def __call__( request (~.bigtable.ReadModifyWriteRowRequest): The request object. Request message for Bigtable.ReadModifyWriteRow. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1184,7 +1177,6 @@ def __call__( request (~.bigtable.ReadRowsRequest): The request object. Request message for Bigtable.ReadRows. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -1280,7 +1272,6 @@ def __call__( request (~.bigtable.SampleRowKeysRequest): The request object. Request message for Bigtable.SampleRowKeys. - retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. From 505273b72bf83d8f92d0e0a92d62f22bce96cc3d Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Thu, 30 Mar 2023 15:07:35 -0700 Subject: [PATCH 03/52] fix: Pass the "retry" when calling read_rows. (#759) --- google/cloud/bigtable/row_data.py | 4 +++- tests/unit/test_row_data.py | 4 +++- tests/unit/test_table.py | 12 +++++++++--- 3 files changed, 15 insertions(+), 5 deletions(-) diff --git a/google/cloud/bigtable/row_data.py b/google/cloud/bigtable/row_data.py index a50fab1ee..e11379108 100644 --- a/google/cloud/bigtable/row_data.py +++ b/google/cloud/bigtable/row_data.py @@ -157,7 +157,9 @@ def __init__(self, read_method, request, retry=DEFAULT_RETRY_READ_ROWS): # Otherwise there is a risk of entering an infinite loop that resets # the timeout counter just before it being triggered. The increment # by 1 second here is customary but should not be much less than that. - self.response_iterator = read_method(request, timeout=self.retry._deadline + 1) + self.response_iterator = read_method( + request, timeout=self.retry._deadline + 1, retry=self.retry + ) self.rows = {} diff --git a/tests/unit/test_row_data.py b/tests/unit/test_row_data.py index 382a81ef1..fba69ceba 100644 --- a/tests/unit/test_row_data.py +++ b/tests/unit/test_row_data.py @@ -446,7 +446,9 @@ def test_partial_rows_data_constructor_with_retry(): client._data_stub.ReadRows, request, retry ) partial_rows_data.read_method.assert_called_once_with( - request, timeout=DEFAULT_RETRY_READ_ROWS.deadline + 1 + request, + timeout=DEFAULT_RETRY_READ_ROWS.deadline + 1, + retry=DEFAULT_RETRY_READ_ROWS, ) assert partial_rows_data.request is request assert partial_rows_data.rows == {} diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index e66a8f0f6..3d7d2e8ee 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -643,6 +643,7 @@ def _table_read_row_helper(chunks, expected_result, app_profile_id=None): from google.cloud.bigtable import table as MUT from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_filters import RowSampleFilter + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -691,7 +692,9 @@ def mock_create_row_request(table_name, **kwargs): assert result == expected_result assert mock_created == expected_request - data_api.read_rows.assert_called_once_with(request_pb, timeout=61.0) + data_api.read_rows.assert_called_once_with( + request_pb, timeout=61.0, retry=DEFAULT_RETRY_READ_ROWS + ) def test_table_read_row_miss_no__responses(): @@ -906,7 +909,7 @@ def mock_create_row_request(table_name, **kwargs): } assert mock_created == [(table.name, created_kwargs)] - data_api.read_rows.assert_called_once_with(request_pb, timeout=61.0) + data_api.read_rows.assert_called_once_with(request_pb, timeout=61.0, retry=retry) def test_table_read_retry_rows(): @@ -1082,6 +1085,7 @@ def test_table_yield_rows_with_row_set(): from google.cloud.bigtable.row_set import RowSet from google.cloud.bigtable.row_set import RowRange from google.cloud.bigtable.table import _create_row_request + from google.cloud.bigtable.row_data import DEFAULT_RETRY_READ_ROWS credentials = _make_credentials() client = _make_client(project="project-id", credentials=credentials, admin=True) @@ -1149,7 +1153,9 @@ def test_table_yield_rows_with_row_set(): end_key=ROW_KEY_2, ) expected_request.rows.row_keys.append(ROW_KEY_3) - data_api.read_rows.assert_called_once_with(expected_request, timeout=61.0) + data_api.read_rows.assert_called_once_with( + expected_request, timeout=61.0, retry=DEFAULT_RETRY_READ_ROWS + ) def test_table_sample_row_keys(): From efe332c6932292b808e1cc02276d7a41c6711f87 Mon Sep 17 00:00:00 2001 From: Shweta Shetye-Sabharwal Date: Wed, 5 Apr 2023 18:34:04 +0000 Subject: [PATCH 04/52] chore(samples): Fixed a typo in the readme (#760) --- samples/hello/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/hello/README.md b/samples/hello/README.md index 0e1fc92f9..b3779fb43 100644 --- a/samples/hello/README.md +++ b/samples/hello/README.md @@ -17,7 +17,7 @@ Demonstrates how to connect to Cloud Bigtable and run some basic operations. Mor To run this sample: -1. If this is your first time working with GCP products, you will need to set up [the Cloud SDK][cloud_sdk] or utilize [Google Cloud Shell][gcloud_shell]. This sample may [require authetication][authentication] and you will need to [enable billing][enable_billing]. +1. If this is your first time working with GCP products, you will need to set up [the Cloud SDK][cloud_sdk] or utilize [Google Cloud Shell][gcloud_shell]. This sample may [require authentication][authentication] and you will need to [enable billing][enable_billing]. 1. Make a fork of this repo and clone the branch locally, then navigate to the sample directory you want to use. From 18b8f1878ff3d25e95d0880c1d59f34f4e962d6b Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Thu, 6 Apr 2023 17:14:36 +0100 Subject: [PATCH 05/52] chore(deps): update all dependencies (#746) --- samples/beam/requirements.txt | 2 +- samples/metricscaler/requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/samples/beam/requirements.txt b/samples/beam/requirements.txt index bcb270e72..8be9b98e0 100644 --- a/samples/beam/requirements.txt +++ b/samples/beam/requirements.txt @@ -1,3 +1,3 @@ -apache-beam==2.45.0 +apache-beam==2.46.0 google-cloud-bigtable==2.17.0 google-cloud-core==2.3.2 diff --git a/samples/metricscaler/requirements.txt b/samples/metricscaler/requirements.txt index e9647809f..02e08b4c8 100644 --- a/samples/metricscaler/requirements.txt +++ b/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ google-cloud-bigtable==2.17.0 -google-cloud-monitoring==2.14.1 +google-cloud-monitoring==2.14.2 From 128b4e1f3eea2dad903d84c8f2933b17a5f0d226 Mon Sep 17 00:00:00 2001 From: Billy Jacobson Date: Fri, 14 Apr 2023 11:08:12 -0400 Subject: [PATCH 06/52] docs: fix delete from column family example (#764) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: - [ ] Make sure to open an issue as a [bug/issue](https://togithub.com/googleapis/python-bigtable/issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea - [ ] Ensure the tests and linter pass - [ ] Code coverage does not decrease (if any source code was changed) - [ ] Appropriate docs were updated (if necessary) Fixes # 🦕 --- samples/snippets/deletes/deletes_snippets.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/samples/snippets/deletes/deletes_snippets.py b/samples/snippets/deletes/deletes_snippets.py index 4e89189db..8e78083bf 100644 --- a/samples/snippets/deletes/deletes_snippets.py +++ b/samples/snippets/deletes/deletes_snippets.py @@ -38,7 +38,7 @@ def delete_from_column_family(project_id, instance_id, table_id): table = instance.table(table_id) row = table.row("phone#4c410523#20190501") row.delete_cells( - column_family_id="cell_plan", columns=["data_plan_01gb", "data_plan_05gb"] + column_family_id="cell_plan", columns=row.ALL_COLUMNS ) row.commit() From b5d10d61fd48ded8655e07b8ecb768c539ee7bf4 Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Tue, 18 Apr 2023 19:00:48 +0200 Subject: [PATCH 07/52] chore(deps): update all dependencies (#763) --- samples/beam/requirements-test.txt | 2 +- samples/hello/requirements-test.txt | 2 +- samples/hello_happybase/requirements-test.txt | 2 +- samples/instanceadmin/requirements-test.txt | 2 +- samples/metricscaler/requirements-test.txt | 4 ++-- samples/quickstart/requirements-test.txt | 2 +- samples/quickstart_happybase/requirements-test.txt | 2 +- samples/snippets/deletes/requirements-test.txt | 2 +- samples/snippets/filters/requirements-test.txt | 2 +- samples/snippets/reads/requirements-test.txt | 2 +- samples/snippets/writes/requirements-test.txt | 2 +- samples/tableadmin/requirements-test.txt | 2 +- 12 files changed, 13 insertions(+), 13 deletions(-) diff --git a/samples/beam/requirements-test.txt b/samples/beam/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/beam/requirements-test.txt +++ b/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/hello/requirements-test.txt b/samples/hello/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/hello/requirements-test.txt +++ b/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/hello_happybase/requirements-test.txt b/samples/hello_happybase/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/hello_happybase/requirements-test.txt +++ b/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/instanceadmin/requirements-test.txt b/samples/instanceadmin/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/instanceadmin/requirements-test.txt +++ b/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/metricscaler/requirements-test.txt b/samples/metricscaler/requirements-test.txt index 82f315c7f..761227068 100644 --- a/samples/metricscaler/requirements-test.txt +++ b/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.2.2 -mock==5.0.1 +pytest==7.3.1 +mock==5.0.2 google-cloud-testutils diff --git a/samples/quickstart/requirements-test.txt b/samples/quickstart/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/quickstart/requirements-test.txt +++ b/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/quickstart_happybase/requirements-test.txt b/samples/quickstart_happybase/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/quickstart_happybase/requirements-test.txt +++ b/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/snippets/deletes/requirements-test.txt b/samples/snippets/deletes/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/snippets/deletes/requirements-test.txt +++ b/samples/snippets/deletes/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/snippets/filters/requirements-test.txt b/samples/snippets/filters/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/snippets/filters/requirements-test.txt +++ b/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/snippets/reads/requirements-test.txt b/samples/snippets/reads/requirements-test.txt index c021c5b5b..c4d04a08d 100644 --- a/samples/snippets/reads/requirements-test.txt +++ b/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/snippets/writes/requirements-test.txt b/samples/snippets/writes/requirements-test.txt index 8d6117f16..96aa71dab 100644 --- a/samples/snippets/writes/requirements-test.txt +++ b/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==7.2.2 +pytest==7.3.1 diff --git a/samples/tableadmin/requirements-test.txt b/samples/tableadmin/requirements-test.txt index d3ddc990f..ca1f33bd3 100644 --- a/samples/tableadmin/requirements-test.txt +++ b/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.2.2 +pytest==7.3.1 google-cloud-testutils==1.3.3 From 171fea6de57a47f92a2a56050f8bfe7518144df7 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 4 May 2023 10:42:05 -0700 Subject: [PATCH 08/52] feat: publish RateLimitInfo and FeatureFlag protos (#768) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: publish RateLimitInfo and FeatureFlag protos PiperOrigin-RevId: 527878708 Source-Link: https://github.com/googleapis/googleapis/commit/f129f486fa0f681456b99c5cc899bec889a3185c Source-Link: https://github.com/googleapis/googleapis-gen/commit/e02c87d9d0c9a77f2b17268a86f462b5a1d66bbd Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZTAyYzg3ZDlkMGM5YTc3ZjJiMTcyNjhhODZmNDYyYjVhMWQ2NmJiZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: Add feature flag proto to BUILD file PiperOrigin-RevId: 528468347 Source-Link: https://github.com/googleapis/googleapis/commit/38247e83e10ace50ec0022302e540e3b0d4be123 Source-Link: https://github.com/googleapis/googleapis-gen/commit/17e62a1ab5f22d7d537675a659157207e406e63d Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMTdlNjJhMWFiNWYyMmQ3ZDUzNzY3NWE2NTkxNTcyMDdlNDA2ZTYzZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- google/cloud/bigtable_v2/__init__.py | 4 ++ .../services/bigtable/async_client.py | 6 +- .../bigtable_v2/services/bigtable/client.py | 6 +- google/cloud/bigtable_v2/types/__init__.py | 6 ++ google/cloud/bigtable_v2/types/bigtable.py | 59 ++++++++++++++++++- .../cloud/bigtable_v2/types/feature_flags.py | 54 +++++++++++++++++ 6 files changed, 129 insertions(+), 6 deletions(-) create mode 100644 google/cloud/bigtable_v2/types/feature_flags.py diff --git a/google/cloud/bigtable_v2/__init__.py b/google/cloud/bigtable_v2/__init__.py index 342718dea..ee3bd8c0c 100644 --- a/google/cloud/bigtable_v2/__init__.py +++ b/google/cloud/bigtable_v2/__init__.py @@ -31,6 +31,7 @@ from .types.bigtable import MutateRowsResponse from .types.bigtable import PingAndWarmRequest from .types.bigtable import PingAndWarmResponse +from .types.bigtable import RateLimitInfo from .types.bigtable import ReadChangeStreamRequest from .types.bigtable import ReadChangeStreamResponse from .types.bigtable import ReadModifyWriteRowRequest @@ -54,6 +55,7 @@ from .types.data import StreamPartition from .types.data import TimestampRange from .types.data import ValueRange +from .types.feature_flags import FeatureFlags from .types.request_stats import FullReadStatsView from .types.request_stats import ReadIterationStats from .types.request_stats import RequestLatencyStats @@ -69,6 +71,7 @@ "Column", "ColumnRange", "Family", + "FeatureFlags", "FullReadStatsView", "GenerateInitialChangeStreamPartitionsRequest", "GenerateInitialChangeStreamPartitionsResponse", @@ -79,6 +82,7 @@ "Mutation", "PingAndWarmRequest", "PingAndWarmResponse", + "RateLimitInfo", "ReadChangeStreamRequest", "ReadChangeStreamResponse", "ReadIterationStats", diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py index 1233e1288..abd82d4d8 100644 --- a/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -242,8 +242,10 @@ def read_rows( on the ``request`` instance; if ``request`` is provided, this should not be set. app_profile_id (:class:`str`): - This value specifies routing for replication. This API - only accepts the empty value of app_profile_id. + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py index 38618fa31..a778aff3c 100644 --- a/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -491,8 +491,10 @@ def read_rows( on the ``request`` instance; if ``request`` is provided, this should not be set. app_profile_id (str): - This value specifies routing for replication. This API - only accepts the empty value of app_profile_id. + This value specifies routing for + replication. If not specified, the + "default" application profile will be + used. This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/bigtable_v2/types/__init__.py b/google/cloud/bigtable_v2/types/__init__.py index bb2533e33..9f15efaf5 100644 --- a/google/cloud/bigtable_v2/types/__init__.py +++ b/google/cloud/bigtable_v2/types/__init__.py @@ -24,6 +24,7 @@ MutateRowsResponse, PingAndWarmRequest, PingAndWarmResponse, + RateLimitInfo, ReadChangeStreamRequest, ReadChangeStreamResponse, ReadModifyWriteRowRequest, @@ -50,6 +51,9 @@ TimestampRange, ValueRange, ) +from .feature_flags import ( + FeatureFlags, +) from .request_stats import ( FullReadStatsView, ReadIterationStats, @@ -71,6 +75,7 @@ "MutateRowsResponse", "PingAndWarmRequest", "PingAndWarmResponse", + "RateLimitInfo", "ReadChangeStreamRequest", "ReadChangeStreamResponse", "ReadModifyWriteRowRequest", @@ -94,6 +99,7 @@ "StreamPartition", "TimestampRange", "ValueRange", + "FeatureFlags", "FullReadStatsView", "ReadIterationStats", "RequestLatencyStats", diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py index ea97588c2..13f6ac0db 100644 --- a/google/cloud/bigtable_v2/types/bigtable.py +++ b/google/cloud/bigtable_v2/types/bigtable.py @@ -38,6 +38,7 @@ "MutateRowResponse", "MutateRowsRequest", "MutateRowsResponse", + "RateLimitInfo", "CheckAndMutateRowRequest", "CheckAndMutateRowResponse", "PingAndWarmRequest", @@ -61,8 +62,9 @@ class ReadRowsRequest(proto.Message): Values are of the form ``projects//instances//tables/``. app_profile_id (str): - This value specifies routing for replication. This API only - accepts the empty value of app_profile_id. + This value specifies routing for replication. + If not specified, the "default" application + profile will be used. rows (google.cloud.bigtable_v2.types.RowSet): The row keys and/or ranges to read sequentially. If not specified, reads from all @@ -469,10 +471,19 @@ class Entry(proto.Message): class MutateRowsResponse(proto.Message): r"""Response message for BigtableService.MutateRows. + .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields + Attributes: entries (MutableSequence[google.cloud.bigtable_v2.types.MutateRowsResponse.Entry]): One or more results for Entries from the batch request. + rate_limit_info (google.cloud.bigtable_v2.types.RateLimitInfo): + Information about how client should limit the + rate (QPS). Primirily used by supported official + Cloud Bigtable clients. If unset, the rate limit + info is not provided by the server. + + This field is a member of `oneof`_ ``_rate_limit_info``. """ class Entry(proto.Message): @@ -506,6 +517,50 @@ class Entry(proto.Message): number=1, message=Entry, ) + rate_limit_info: "RateLimitInfo" = proto.Field( + proto.MESSAGE, + number=3, + optional=True, + message="RateLimitInfo", + ) + + +class RateLimitInfo(proto.Message): + r"""Information about how client should adjust the load to + Bigtable. + + Attributes: + period (google.protobuf.duration_pb2.Duration): + Time that clients should wait before + adjusting the target rate again. If clients + adjust rate too frequently, the impact of the + previous adjustment may not have been taken into + account and may over-throttle or under-throttle. + If clients adjust rate too slowly, they will not + be responsive to load changes on server side, + and may over-throttle or under-throttle. + factor (float): + If it has been at least one ``period`` since the last load + adjustment, the client should multiply the current load by + this value to get the new target load. For example, if the + current load is 100 and ``factor`` is 0.8, the new target + load should be 80. After adjusting, the client should ignore + ``factor`` until another ``period`` has passed. + + The client can measure its load using any unit that's + comparable over time For example, QPS can be used as long as + each request involves a similar amount of work. + """ + + period: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + factor: float = proto.Field( + proto.DOUBLE, + number=2, + ) class CheckAndMutateRowRequest(proto.Message): diff --git a/google/cloud/bigtable_v2/types/feature_flags.py b/google/cloud/bigtable_v2/types/feature_flags.py new file mode 100644 index 000000000..1b5f76e24 --- /dev/null +++ b/google/cloud/bigtable_v2/types/feature_flags.py @@ -0,0 +1,54 @@ +# -*- coding: utf-8 -*- +# Copyright 2022 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +from __future__ import annotations + +from typing import MutableMapping, MutableSequence + +import proto # type: ignore + + +__protobuf__ = proto.module( + package="google.bigtable.v2", + manifest={ + "FeatureFlags", + }, +) + + +class FeatureFlags(proto.Message): + r"""Feature flags supported by a client. This is intended to be sent as + part of request metadata to assure the server that certain behaviors + are safe to enable. This proto is meant to be serialized and + websafe-base64 encoded under the ``bigtable-features`` metadata key. + The value will remain constant for the lifetime of a client and due + to HTTP2's HPACK compression, the request overhead will be tiny. + This is an internal implementation detail and should not be used by + endusers directly. + + Attributes: + mutate_rows_rate_limit (bool): + Notify the server that the client enables + batch write flow control by requesting + RateLimitInfo from MutateRowsResponse. + """ + + mutate_rows_rate_limit: bool = proto.Field( + proto.BOOL, + number=3, + ) + + +__all__ = tuple(sorted(__protobuf__.manifest)) From 7521a617c121ead96a21ca47959a53b2db2da090 Mon Sep 17 00:00:00 2001 From: Mariatta Wijaya Date: Wed, 10 May 2023 15:42:01 -0700 Subject: [PATCH 09/52] Feat: Threaded MutationsBatcher (#722) - Batch mutations in a thread to allow concurrent batching - Flush the batch every second - Add flow control to control inflight requests Co-authored-by: Mattie Fu --- docs/batcher.rst | 6 + docs/usage.rst | 1 + google/cloud/bigtable/batcher.py | 366 +++++++++++++++++++++++++------ google/cloud/bigtable/table.py | 6 +- tests/unit/test_batcher.py | 218 ++++++++++++------ 5 files changed, 469 insertions(+), 128 deletions(-) create mode 100644 docs/batcher.rst diff --git a/docs/batcher.rst b/docs/batcher.rst new file mode 100644 index 000000000..9ac335be1 --- /dev/null +++ b/docs/batcher.rst @@ -0,0 +1,6 @@ +Mutations Batching +~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.batcher + :members: + :show-inheritance: diff --git a/docs/usage.rst b/docs/usage.rst index 33bf7bb7f..73a32b039 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -17,6 +17,7 @@ Using the API row-data row-filters row-set + batcher In the hierarchy of API concepts diff --git a/google/cloud/bigtable/batcher.py b/google/cloud/bigtable/batcher.py index 3c23f4436..6b06ec060 100644 --- a/google/cloud/bigtable/batcher.py +++ b/google/cloud/bigtable/batcher.py @@ -13,104 +13,251 @@ # limitations under the License. """User friendly container for Google Cloud Bigtable MutationBatcher.""" +import threading +import queue +import concurrent.futures +import atexit -FLUSH_COUNT = 1000 -MAX_MUTATIONS = 100000 -MAX_ROW_BYTES = 5242880 # 5MB +from google.api_core.exceptions import from_grpc_status +from dataclasses import dataclass -class MaxMutationsError(ValueError): - """The number of mutations for bulk request is too big.""" +FLUSH_COUNT = 100 # after this many elements, send out the batch + +MAX_MUTATION_SIZE = 20 * 1024 * 1024 # 20MB # after this many bytes, send out the batch + +MAX_OUTSTANDING_BYTES = 100 * 1024 * 1024 # 100MB # max inflight byte size. + +MAX_OUTSTANDING_ELEMENTS = 100000 # max inflight mutations. + + +class MutationsBatchError(Exception): + """Error in the batch request""" + + def __init__(self, message, exc): + self.exc = exc + self.message = message + super().__init__(self.message) + + +class _MutationsBatchQueue(object): + """Private Threadsafe Queue to hold rows for batching.""" + + def __init__(self, max_mutation_bytes=MAX_MUTATION_SIZE, flush_count=FLUSH_COUNT): + """Specify the queue constraints""" + self._queue = queue.Queue() + self.total_mutation_count = 0 + self.total_size = 0 + self.max_mutation_bytes = max_mutation_bytes + self.flush_count = flush_count + + def get(self): + """Retrieve an item from the queue. Recalculate queue size.""" + row = self._queue.get() + mutation_size = row.get_mutations_size() + self.total_mutation_count -= len(row._get_mutations()) + self.total_size -= mutation_size + return row + + def put(self, item): + """Insert an item to the queue. Recalculate queue size.""" + + mutation_count = len(item._get_mutations()) + + self._queue.put(item) + + self.total_size += item.get_mutations_size() + self.total_mutation_count += mutation_count + + def full(self): + """Check if the queue is full.""" + if ( + self.total_mutation_count >= self.flush_count + or self.total_size >= self.max_mutation_bytes + ): + return True + return False + + def empty(self): + return self._queue.empty() + + +@dataclass +class _BatchInfo: + """Keeping track of size of a batch""" + + mutations_count: int = 0 + rows_count: int = 0 + mutations_size: int = 0 + + +class _FlowControl(object): + def __init__( + self, + max_mutations=MAX_OUTSTANDING_ELEMENTS, + max_mutation_bytes=MAX_OUTSTANDING_BYTES, + ): + """Control the inflight requests. Keep track of the mutations, row bytes and row counts. + As requests to backend are being made, adjust the number of mutations being processed. + + If threshold is reached, block the flow. + Reopen the flow as requests are finished. + """ + self.max_mutations = max_mutations + self.max_mutation_bytes = max_mutation_bytes + self.inflight_mutations = 0 + self.inflight_size = 0 + self.event = threading.Event() + self.event.set() + + def is_blocked(self): + """Returns True if: + + - inflight mutations >= max_mutations, or + - inflight bytes size >= max_mutation_bytes, or + """ + + return ( + self.inflight_mutations >= self.max_mutations + or self.inflight_size >= self.max_mutation_bytes + ) + + def control_flow(self, batch_info): + """ + Calculate the resources used by this batch + """ + + self.inflight_mutations += batch_info.mutations_count + self.inflight_size += batch_info.mutations_size + self.set_flow_control_status() + + def wait(self): + """ + Wait until flow control pushback has been released. + It awakens as soon as `event` is set. + """ + self.event.wait() + + def set_flow_control_status(self): + """Check the inflight mutations and size. + + If values exceed the allowed threshold, block the event. + """ + if self.is_blocked(): + self.event.clear() # sleep + else: + self.event.set() # awaken the threads + + def release(self, batch_info): + """ + Release the resources. + Decrement the row size to allow enqueued mutations to be run. + """ + self.inflight_mutations -= batch_info.mutations_count + self.inflight_size -= batch_info.mutations_size + self.set_flow_control_status() class MutationsBatcher(object): """A MutationsBatcher is used in batch cases where the number of mutations - is large or unknown. It will store DirectRows in memory until one of the - size limits is reached, or an explicit call to flush() is performed. When - a flush event occurs, the DirectRows in memory will be sent to Cloud + is large or unknown. It will store :class:`DirectRow` in memory until one of the + size limits is reached, or an explicit call to :func:`flush()` is performed. When + a flush event occurs, the :class:`DirectRow` in memory will be sent to Cloud Bigtable. Batching mutations is more efficient than sending individual request. This class is not suited for usage in systems where each mutation must be guaranteed to be sent, since calling mutate may only result in an - in-memory change. In a case of a system crash, any DirectRows remaining in + in-memory change. In a case of a system crash, any :class:`DirectRow` remaining in memory will not necessarily be sent to the service, even after the - completion of the mutate() method. + completion of the :func:`mutate()` method. - TODO: Performance would dramatically improve if this class had the - capability of asynchronous, parallel RPCs. + Note on thread safety: The same :class:`MutationBatcher` cannot be shared by multiple end-user threads. :type table: class :param table: class:`~google.cloud.bigtable.table.Table`. :type flush_count: int :param flush_count: (Optional) Max number of rows to flush. If it - reaches the max number of rows it calls finish_batch() to mutate the - current row batch. Default is FLUSH_COUNT (1000 rows). + reaches the max number of rows it calls finish_batch() to mutate the + current row batch. Default is FLUSH_COUNT (1000 rows). :type max_row_bytes: int :param max_row_bytes: (Optional) Max number of row mutations size to - flush. If it reaches the max number of row mutations size it calls - finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES - (5 MB). + flush. If it reaches the max number of row mutations size it calls + finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES + (5 MB). + + :type flush_interval: float + :param flush_interval: (Optional) The interval (in seconds) between asynchronous flush. + Default is 1 second. """ - def __init__(self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): - self.rows = [] - self.total_mutation_count = 0 - self.total_size = 0 + def __init__( + self, + table, + flush_count=FLUSH_COUNT, + max_row_bytes=MAX_MUTATION_SIZE, + flush_interval=1, + ): + self._rows = _MutationsBatchQueue( + max_mutation_bytes=max_row_bytes, flush_count=flush_count + ) self.table = table - self.flush_count = flush_count - self.max_row_bytes = max_row_bytes + self._executor = concurrent.futures.ThreadPoolExecutor() + atexit.register(self.close) + self._timer = threading.Timer(flush_interval, self.flush) + self._timer.start() + self.flow_control = _FlowControl( + max_mutations=MAX_OUTSTANDING_ELEMENTS, + max_mutation_bytes=MAX_OUTSTANDING_BYTES, + ) + self.futures_mapping = {} + self.exceptions = queue.Queue() + + @property + def flush_count(self): + return self._rows.flush_count + + @property + def max_row_bytes(self): + return self._rows.max_mutation_bytes + + def __enter__(self): + """Starting the MutationsBatcher as a context manager""" + return self def mutate(self, row): """Add a row to the batch. If the current batch meets one of the size - limits, the batch is sent synchronously. + limits, the batch is sent asynchronously. For example: - .. literalinclude:: snippets.py + .. literalinclude:: snippets_table.py :start-after: [START bigtable_api_batcher_mutate] :end-before: [END bigtable_api_batcher_mutate] :dedent: 4 :type row: class - :param row: class:`~google.cloud.bigtable.row.DirectRow`. + :param row: :class:`~google.cloud.bigtable.row.DirectRow`. :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any - row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't - match the number of rows that were retried - * :exc:`.batcher.MaxMutationsError` if any row exceeds max - mutations count. - """ - mutation_count = len(row._get_mutations()) - if mutation_count > MAX_MUTATIONS: - raise MaxMutationsError( - "The row key {} exceeds the number of mutations {}.".format( - row.row_key, mutation_count - ) - ) - - if (self.total_mutation_count + mutation_count) >= MAX_MUTATIONS: - self.flush() - - self.rows.append(row) - self.total_mutation_count += mutation_count - self.total_size += row.get_mutations_size() + * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried + """ + self._rows.put(row) - if self.total_size >= self.max_row_bytes or len(self.rows) >= self.flush_count: - self.flush() + if self._rows.full(): + self._flush_async() def mutate_rows(self, rows): """Add multiple rows to the batch. If the current batch meets one of the size - limits, the batch is sent synchronously. + limits, the batch is sent asynchronously. For example: - .. literalinclude:: snippets.py + .. literalinclude:: snippets_table.py :start-after: [START bigtable_api_batcher_mutate_rows] :end-before: [END bigtable_api_batcher_mutate_rows] :dedent: 4 @@ -119,28 +266,119 @@ def mutate_rows(self, rows): :param rows: list:[`~google.cloud.bigtable.row.DirectRow`]. :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any - row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't - match the number of rows that were retried - * :exc:`.batcher.MaxMutationsError` if any row exceeds max - mutations count. + * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried """ for row in rows: self.mutate(row) def flush(self): - """Sends the current. batch to Cloud Bigtable. + """Sends the current batch to Cloud Bigtable synchronously. For example: - .. literalinclude:: snippets.py + .. literalinclude:: snippets_table.py :start-after: [START bigtable_api_batcher_flush] :end-before: [END bigtable_api_batcher_flush] :dedent: 4 + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + rows_to_flush = [] + while not self._rows.empty(): + rows_to_flush.append(self._rows.get()) + response = self._flush_rows(rows_to_flush) + return response + + def _flush_async(self): + """Sends the current batch to Cloud Bigtable asynchronously. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + + rows_to_flush = [] + mutations_count = 0 + mutations_size = 0 + rows_count = 0 + batch_info = _BatchInfo() + + while not self._rows.empty(): + row = self._rows.get() + mutations_count += len(row._get_mutations()) + mutations_size += row.get_mutations_size() + rows_count += 1 + rows_to_flush.append(row) + batch_info.mutations_count = mutations_count + batch_info.rows_count = rows_count + batch_info.mutations_size = mutations_size + + if ( + rows_count >= self.flush_count + or mutations_size >= self.max_row_bytes + or mutations_count >= self.flow_control.max_mutations + or mutations_size >= self.flow_control.max_mutation_bytes + or self._rows.empty() # submit when it reached the end of the queue + ): + # wait for resources to become available, before submitting any new batch + self.flow_control.wait() + # once unblocked, submit a batch + # event flag will be set by control_flow to block subsequent thread, but not blocking this one + self.flow_control.control_flow(batch_info) + future = self._executor.submit(self._flush_rows, rows_to_flush) + self.futures_mapping[future] = batch_info + future.add_done_callback(self._batch_completed_callback) + + # reset and start a new batch + rows_to_flush = [] + mutations_size = 0 + rows_count = 0 + mutations_count = 0 + batch_info = _BatchInfo() + + def _batch_completed_callback(self, future): + """Callback for when the mutation has finished. + + Raise exceptions if there's any. + Release the resources locked by the flow control and allow enqueued tasks to be run. + """ + + processed_rows = self.futures_mapping[future] + self.flow_control.release(processed_rows) + del self.futures_mapping[future] + + def _flush_rows(self, rows_to_flush): + """Mutate the specified rows. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + responses = [] + if len(rows_to_flush) > 0: + response = self.table.mutate_rows(rows_to_flush) + + for result in response: + if result.code != 0: + exc = from_grpc_status(result.code, result.message) + self.exceptions.put(exc) + responses.append(result) + + return responses + + def __exit__(self, exc_type, exc_value, exc_traceback): + """Clean up resources. Flush and shutdown the ThreadPoolExecutor.""" + self.close() + + def close(self): + """Clean up resources. Flush and shutdown the ThreadPoolExecutor. + Any errors will be raised. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. """ - if len(self.rows) != 0: - self.table.mutate_rows(self.rows) - self.total_mutation_count = 0 - self.total_size = 0 - self.rows = [] + self.flush() + self._executor.shutdown(wait=True) + atexit.unregister(self.close) + if self.exceptions.qsize() > 0: + exc = list(self.exceptions.queue) + raise MutationsBatchError("Errors in batch mutations.", exc=exc) diff --git a/google/cloud/bigtable/table.py b/google/cloud/bigtable/table.py index 8605992ba..e3191a729 100644 --- a/google/cloud/bigtable/table.py +++ b/google/cloud/bigtable/table.py @@ -32,7 +32,7 @@ from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.batcher import MutationsBatcher -from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_ROW_BYTES +from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_MUTATION_SIZE from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.row import AppendRow @@ -844,7 +844,9 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): request={"name": self.name, "row_key_prefix": _to_bytes(row_key_prefix)} ) - def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): + def mutations_batcher( + self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_MUTATION_SIZE + ): """Factory to create a mutation batcher associated with this instance. For example: diff --git a/tests/unit/test_batcher.py b/tests/unit/test_batcher.py index 9ae6ed175..a238b2852 100644 --- a/tests/unit/test_batcher.py +++ b/tests/unit/test_batcher.py @@ -14,122 +14,118 @@ import mock +import time + import pytest from google.cloud.bigtable.row import DirectRow +from google.cloud.bigtable.batcher import ( + _FlowControl, + MutationsBatcher, + MutationsBatchError, +) TABLE_ID = "table-id" TABLE_NAME = "/tables/" + TABLE_ID -def _make_mutation_batcher(table, **kw): - from google.cloud.bigtable.batcher import MutationsBatcher - - return MutationsBatcher(table, **kw) - - def test_mutation_batcher_constructor(): table = _Table(TABLE_NAME) - - mutation_batcher = _make_mutation_batcher(table) - assert table is mutation_batcher.table + with MutationsBatcher(table) as mutation_batcher: + assert table is mutation_batcher.table def test_mutation_batcher_mutate_row(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - rows = [ - DirectRow(row_key=b"row_key"), - DirectRow(row_key=b"row_key_2"), - DirectRow(row_key=b"row_key_3"), - DirectRow(row_key=b"row_key_4"), - ] + rows = [ + DirectRow(row_key=b"row_key"), + DirectRow(row_key=b"row_key_2"), + DirectRow(row_key=b"row_key_3"), + DirectRow(row_key=b"row_key_4"), + ] - mutation_batcher.mutate_rows(rows) - mutation_batcher.flush() + mutation_batcher.mutate_rows(rows) assert table.mutation_calls == 1 def test_mutation_batcher_mutate(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) - - mutation_batcher.mutate(row) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) + row.set_cell("cf1", b"c4", 4) - mutation_batcher.flush() + mutation_batcher.mutate(row) assert table.mutation_calls == 1 def test_mutation_batcher_flush_w_no_rows(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) - mutation_batcher.flush() + with MutationsBatcher(table=table) as mutation_batcher: + mutation_batcher.flush() assert table.mutation_calls == 0 def test_mutation_batcher_mutate_w_max_flush_count(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table, flush_count=3) + with MutationsBatcher(table=table, flush_count=3) as mutation_batcher: - row_1 = DirectRow(row_key=b"row_key_1") - row_2 = DirectRow(row_key=b"row_key_2") - row_3 = DirectRow(row_key=b"row_key_3") + row_1 = DirectRow(row_key=b"row_key_1") + row_2 = DirectRow(row_key=b"row_key_2") + row_3 = DirectRow(row_key=b"row_key_3") - mutation_batcher.mutate(row_1) - mutation_batcher.mutate(row_2) - mutation_batcher.mutate(row_3) + mutation_batcher.mutate(row_1) + mutation_batcher.mutate(row_2) + mutation_batcher.mutate(row_3) assert table.mutation_calls == 1 -@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) -def test_mutation_batcher_mutate_with_max_mutations_failure(): - from google.cloud.bigtable.batcher import MaxMutationsError - +@mock.patch("google.cloud.bigtable.batcher.MAX_OUTSTANDING_ELEMENTS", new=3) +def test_mutation_batcher_mutate_w_max_mutations(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) - with pytest.raises(MaxMutationsError): mutation_batcher.mutate(row) + assert table.mutation_calls == 1 + -@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) -def test_mutation_batcher_mutate_w_max_mutations(): +def test_mutation_batcher_mutate_w_max_row_bytes(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher( + table=table, max_row_bytes=3 * 1024 * 1024 + ) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes - mutation_batcher.mutate(row) - mutation_batcher.flush() + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", max_value) + row.set_cell("cf1", b"c2", max_value) + row.set_cell("cf1", b"c3", max_value) + + mutation_batcher.mutate(row) assert table.mutation_calls == 1 -def test_mutation_batcher_mutate_w_max_row_bytes(): +def test_mutations_batcher_flushed_when_closed(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher( - table=table, max_row_bytes=3 * 1024 * 1024 - ) + mutation_batcher = MutationsBatcher(table=table, max_row_bytes=3 * 1024 * 1024) number_of_bytes = 1 * 1024 * 1024 max_value = b"1" * number_of_bytes @@ -137,13 +133,108 @@ def test_mutation_batcher_mutate_w_max_row_bytes(): row = DirectRow(row_key=b"row_key") row.set_cell("cf1", b"c1", max_value) row.set_cell("cf1", b"c2", max_value) - row.set_cell("cf1", b"c3", max_value) mutation_batcher.mutate(row) + assert table.mutation_calls == 0 + + mutation_batcher.close() + + assert table.mutation_calls == 1 + + +def test_mutations_batcher_context_manager_flushed_when_closed(): + table = _Table(TABLE_NAME) + with MutationsBatcher( + table=table, max_row_bytes=3 * 1024 * 1024 + ) as mutation_batcher: + + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes + + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", max_value) + row.set_cell("cf1", b"c2", max_value) + + mutation_batcher.mutate(row) assert table.mutation_calls == 1 +@mock.patch("google.cloud.bigtable.batcher.MutationsBatcher.flush") +def test_mutations_batcher_flush_interval(mocked_flush): + table = _Table(TABLE_NAME) + flush_interval = 0.5 + mutation_batcher = MutationsBatcher(table=table, flush_interval=flush_interval) + + assert mutation_batcher._timer.interval == flush_interval + mocked_flush.assert_not_called() + + time.sleep(0.4) + mocked_flush.assert_not_called() + + time.sleep(0.1) + mocked_flush.assert_called_once_with() + + mutation_batcher.close() + + +def test_mutations_batcher_response_with_error_codes(): + from google.rpc.status_pb2 import Status + + mocked_response = [Status(code=1), Status(code=5)] + + with mock.patch("tests.unit.test_batcher._Table") as mocked_table: + table = mocked_table.return_value + mutation_batcher = MutationsBatcher(table=table) + + row1 = DirectRow(row_key=b"row_key") + row2 = DirectRow(row_key=b"row_key") + table.mutate_rows.return_value = mocked_response + + mutation_batcher.mutate_rows([row1, row2]) + with pytest.raises(MutationsBatchError) as exc: + mutation_batcher.close() + assert exc.value.message == "Errors in batch mutations." + assert len(exc.value.exc) == 2 + + assert exc.value.exc[0].message == mocked_response[0].message + assert exc.value.exc[1].message == mocked_response[1].message + + +def test_flow_control_event_is_set_when_not_blocked(): + flow_control = _FlowControl() + + flow_control.set_flow_control_status() + assert flow_control.event.is_set() + + +def test_flow_control_event_is_not_set_when_blocked(): + flow_control = _FlowControl() + + flow_control.inflight_mutations = flow_control.max_mutations + flow_control.inflight_size = flow_control.max_mutation_bytes + + flow_control.set_flow_control_status() + assert not flow_control.event.is_set() + + +@mock.patch("concurrent.futures.ThreadPoolExecutor.submit") +def test_flush_async_batch_count(mocked_executor_submit): + table = _Table(TABLE_NAME) + mutation_batcher = MutationsBatcher(table=table, flush_count=2) + + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes + for index in range(5): + row = DirectRow(row_key=f"row_key_{index}") + row.set_cell("cf1", b"c1", max_value) + mutation_batcher.mutate(row) + mutation_batcher._flush_async() + + # 3 batches submitted. 2 batches of 2 items, and the last one a single item batch. + assert mocked_executor_submit.call_count == 3 + + class _Instance(object): def __init__(self, client=None): self._client = client @@ -156,5 +247,8 @@ def __init__(self, name, client=None): self.mutation_calls = 0 def mutate_rows(self, rows): + from google.rpc.status_pb2 import Status + self.mutation_calls += 1 - return rows + + return [Status(code=0) for _ in rows] From 8f9de8d58018d97f008dca31b357fdbd2edb5ab7 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 10 May 2023 16:13:17 -0700 Subject: [PATCH 10/52] chore(main): release 2.18.0 (#757) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .release-please-manifest.json | 2 +- CHANGELOG.md | 19 +++++++++++++++++++ google/cloud/bigtable/gapic_version.py | 2 +- google/cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 24 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 882f663e6..a627e662e 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.17.0" + ".": "2.18.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 78b4d1b29..2d7fe5141 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,25 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.18.0](https://github.com/googleapis/python-bigtable/compare/v2.17.0...v2.18.0) (2023-05-10) + + +### Features + +* Publish RateLimitInfo and FeatureFlag protos ([#768](https://github.com/googleapis/python-bigtable/issues/768)) ([171fea6](https://github.com/googleapis/python-bigtable/commit/171fea6de57a47f92a2a56050f8bfe7518144df7)) +* Threaded MutationsBatcher ([#722](https://github.com/googleapis/python-bigtable/issues/722)) ([7521a61](https://github.com/googleapis/python-bigtable/commit/7521a617c121ead96a21ca47959a53b2db2da090)) + + +### Bug Fixes + +* Pass the "retry" when calling read_rows. ([#759](https://github.com/googleapis/python-bigtable/issues/759)) ([505273b](https://github.com/googleapis/python-bigtable/commit/505273b72bf83d8f92d0e0a92d62f22bce96cc3d)) + + +### Documentation + +* Fix delete from column family example ([#764](https://github.com/googleapis/python-bigtable/issues/764)) ([128b4e1](https://github.com/googleapis/python-bigtable/commit/128b4e1f3eea2dad903d84c8f2933b17a5f0d226)) +* Fix formatting of request arg in docstring ([#756](https://github.com/googleapis/python-bigtable/issues/756)) ([45d3e43](https://github.com/googleapis/python-bigtable/commit/45d3e4308c4f494228c2e6e18a36285c557cb0c3)) + ## [2.17.0](https://github.com/googleapis/python-bigtable/compare/v2.16.0...v2.17.0) (2023-03-01) diff --git a/google/cloud/bigtable/gapic_version.py b/google/cloud/bigtable/gapic_version.py index 8d4f4cfb6..f09943f6b 100644 --- a/google/cloud/bigtable/gapic_version.py +++ b/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.17.0" # {x-release-please-version} +__version__ = "2.18.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin/gapic_version.py b/google/cloud/bigtable_admin/gapic_version.py index 8d4f4cfb6..f09943f6b 100644 --- a/google/cloud/bigtable_admin/gapic_version.py +++ b/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.17.0" # {x-release-please-version} +__version__ = "2.18.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin_v2/gapic_version.py b/google/cloud/bigtable_admin_v2/gapic_version.py index 8d4f4cfb6..f09943f6b 100644 --- a/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.17.0" # {x-release-please-version} +__version__ = "2.18.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_v2/gapic_version.py b/google/cloud/bigtable_v2/gapic_version.py index 8d4f4cfb6..f09943f6b 100644 --- a/google/cloud/bigtable_v2/gapic_version.py +++ b/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.17.0" # {x-release-please-version} +__version__ = "2.18.0" # {x-release-please-version} From a767cff95d990994f85f5fd05cc10f952087b49d Mon Sep 17 00:00:00 2001 From: Mattie Fu Date: Thu, 11 May 2023 15:30:12 -0400 Subject: [PATCH 11/52] fix: Revert "Feat: Threaded MutationsBatcher" (#773) Reverts googleapis/python-bigtable#722 This PR caused beam bigtableio.py failures https://togithub.com/apache/beam/issues/26673 and is blocking beam release. We're unclear why it caused the failure. So will revert this change, cut another release so we can unblock beam and investigate separately. --- docs/batcher.rst | 6 - docs/usage.rst | 1 - google/cloud/bigtable/batcher.py | 366 ++++++------------------------- google/cloud/bigtable/table.py | 6 +- tests/unit/test_batcher.py | 218 ++++++------------ 5 files changed, 128 insertions(+), 469 deletions(-) delete mode 100644 docs/batcher.rst diff --git a/docs/batcher.rst b/docs/batcher.rst deleted file mode 100644 index 9ac335be1..000000000 --- a/docs/batcher.rst +++ /dev/null @@ -1,6 +0,0 @@ -Mutations Batching -~~~~~~~~~~~~~~~~~~ - -.. automodule:: google.cloud.bigtable.batcher - :members: - :show-inheritance: diff --git a/docs/usage.rst b/docs/usage.rst index 73a32b039..33bf7bb7f 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -17,7 +17,6 @@ Using the API row-data row-filters row-set - batcher In the hierarchy of API concepts diff --git a/google/cloud/bigtable/batcher.py b/google/cloud/bigtable/batcher.py index 6b06ec060..3c23f4436 100644 --- a/google/cloud/bigtable/batcher.py +++ b/google/cloud/bigtable/batcher.py @@ -13,251 +13,104 @@ # limitations under the License. """User friendly container for Google Cloud Bigtable MutationBatcher.""" -import threading -import queue -import concurrent.futures -import atexit -from google.api_core.exceptions import from_grpc_status -from dataclasses import dataclass +FLUSH_COUNT = 1000 +MAX_MUTATIONS = 100000 +MAX_ROW_BYTES = 5242880 # 5MB -FLUSH_COUNT = 100 # after this many elements, send out the batch - -MAX_MUTATION_SIZE = 20 * 1024 * 1024 # 20MB # after this many bytes, send out the batch - -MAX_OUTSTANDING_BYTES = 100 * 1024 * 1024 # 100MB # max inflight byte size. - -MAX_OUTSTANDING_ELEMENTS = 100000 # max inflight mutations. - - -class MutationsBatchError(Exception): - """Error in the batch request""" - - def __init__(self, message, exc): - self.exc = exc - self.message = message - super().__init__(self.message) - - -class _MutationsBatchQueue(object): - """Private Threadsafe Queue to hold rows for batching.""" - - def __init__(self, max_mutation_bytes=MAX_MUTATION_SIZE, flush_count=FLUSH_COUNT): - """Specify the queue constraints""" - self._queue = queue.Queue() - self.total_mutation_count = 0 - self.total_size = 0 - self.max_mutation_bytes = max_mutation_bytes - self.flush_count = flush_count - - def get(self): - """Retrieve an item from the queue. Recalculate queue size.""" - row = self._queue.get() - mutation_size = row.get_mutations_size() - self.total_mutation_count -= len(row._get_mutations()) - self.total_size -= mutation_size - return row - - def put(self, item): - """Insert an item to the queue. Recalculate queue size.""" - - mutation_count = len(item._get_mutations()) - - self._queue.put(item) - - self.total_size += item.get_mutations_size() - self.total_mutation_count += mutation_count - - def full(self): - """Check if the queue is full.""" - if ( - self.total_mutation_count >= self.flush_count - or self.total_size >= self.max_mutation_bytes - ): - return True - return False - - def empty(self): - return self._queue.empty() - - -@dataclass -class _BatchInfo: - """Keeping track of size of a batch""" - - mutations_count: int = 0 - rows_count: int = 0 - mutations_size: int = 0 - - -class _FlowControl(object): - def __init__( - self, - max_mutations=MAX_OUTSTANDING_ELEMENTS, - max_mutation_bytes=MAX_OUTSTANDING_BYTES, - ): - """Control the inflight requests. Keep track of the mutations, row bytes and row counts. - As requests to backend are being made, adjust the number of mutations being processed. - - If threshold is reached, block the flow. - Reopen the flow as requests are finished. - """ - self.max_mutations = max_mutations - self.max_mutation_bytes = max_mutation_bytes - self.inflight_mutations = 0 - self.inflight_size = 0 - self.event = threading.Event() - self.event.set() - - def is_blocked(self): - """Returns True if: - - - inflight mutations >= max_mutations, or - - inflight bytes size >= max_mutation_bytes, or - """ - - return ( - self.inflight_mutations >= self.max_mutations - or self.inflight_size >= self.max_mutation_bytes - ) - - def control_flow(self, batch_info): - """ - Calculate the resources used by this batch - """ - - self.inflight_mutations += batch_info.mutations_count - self.inflight_size += batch_info.mutations_size - self.set_flow_control_status() - - def wait(self): - """ - Wait until flow control pushback has been released. - It awakens as soon as `event` is set. - """ - self.event.wait() - - def set_flow_control_status(self): - """Check the inflight mutations and size. - - If values exceed the allowed threshold, block the event. - """ - if self.is_blocked(): - self.event.clear() # sleep - else: - self.event.set() # awaken the threads - - def release(self, batch_info): - """ - Release the resources. - Decrement the row size to allow enqueued mutations to be run. - """ - self.inflight_mutations -= batch_info.mutations_count - self.inflight_size -= batch_info.mutations_size - self.set_flow_control_status() +class MaxMutationsError(ValueError): + """The number of mutations for bulk request is too big.""" class MutationsBatcher(object): """A MutationsBatcher is used in batch cases where the number of mutations - is large or unknown. It will store :class:`DirectRow` in memory until one of the - size limits is reached, or an explicit call to :func:`flush()` is performed. When - a flush event occurs, the :class:`DirectRow` in memory will be sent to Cloud + is large or unknown. It will store DirectRows in memory until one of the + size limits is reached, or an explicit call to flush() is performed. When + a flush event occurs, the DirectRows in memory will be sent to Cloud Bigtable. Batching mutations is more efficient than sending individual request. This class is not suited for usage in systems where each mutation must be guaranteed to be sent, since calling mutate may only result in an - in-memory change. In a case of a system crash, any :class:`DirectRow` remaining in + in-memory change. In a case of a system crash, any DirectRows remaining in memory will not necessarily be sent to the service, even after the - completion of the :func:`mutate()` method. + completion of the mutate() method. - Note on thread safety: The same :class:`MutationBatcher` cannot be shared by multiple end-user threads. + TODO: Performance would dramatically improve if this class had the + capability of asynchronous, parallel RPCs. :type table: class :param table: class:`~google.cloud.bigtable.table.Table`. :type flush_count: int :param flush_count: (Optional) Max number of rows to flush. If it - reaches the max number of rows it calls finish_batch() to mutate the - current row batch. Default is FLUSH_COUNT (1000 rows). + reaches the max number of rows it calls finish_batch() to mutate the + current row batch. Default is FLUSH_COUNT (1000 rows). :type max_row_bytes: int :param max_row_bytes: (Optional) Max number of row mutations size to - flush. If it reaches the max number of row mutations size it calls - finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES - (5 MB). - - :type flush_interval: float - :param flush_interval: (Optional) The interval (in seconds) between asynchronous flush. - Default is 1 second. + flush. If it reaches the max number of row mutations size it calls + finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES + (5 MB). """ - def __init__( - self, - table, - flush_count=FLUSH_COUNT, - max_row_bytes=MAX_MUTATION_SIZE, - flush_interval=1, - ): - self._rows = _MutationsBatchQueue( - max_mutation_bytes=max_row_bytes, flush_count=flush_count - ) + def __init__(self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): + self.rows = [] + self.total_mutation_count = 0 + self.total_size = 0 self.table = table - self._executor = concurrent.futures.ThreadPoolExecutor() - atexit.register(self.close) - self._timer = threading.Timer(flush_interval, self.flush) - self._timer.start() - self.flow_control = _FlowControl( - max_mutations=MAX_OUTSTANDING_ELEMENTS, - max_mutation_bytes=MAX_OUTSTANDING_BYTES, - ) - self.futures_mapping = {} - self.exceptions = queue.Queue() - - @property - def flush_count(self): - return self._rows.flush_count - - @property - def max_row_bytes(self): - return self._rows.max_mutation_bytes - - def __enter__(self): - """Starting the MutationsBatcher as a context manager""" - return self + self.flush_count = flush_count + self.max_row_bytes = max_row_bytes def mutate(self, row): """Add a row to the batch. If the current batch meets one of the size - limits, the batch is sent asynchronously. + limits, the batch is sent synchronously. For example: - .. literalinclude:: snippets_table.py + .. literalinclude:: snippets.py :start-after: [START bigtable_api_batcher_mutate] :end-before: [END bigtable_api_batcher_mutate] :dedent: 4 :type row: class - :param row: :class:`~google.cloud.bigtable.row.DirectRow`. + :param row: class:`~google.cloud.bigtable.row.DirectRow`. :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried - """ - self._rows.put(row) + * :exc:`~.table._BigtableRetryableError` if any + row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't + match the number of rows that were retried + * :exc:`.batcher.MaxMutationsError` if any row exceeds max + mutations count. + """ + mutation_count = len(row._get_mutations()) + if mutation_count > MAX_MUTATIONS: + raise MaxMutationsError( + "The row key {} exceeds the number of mutations {}.".format( + row.row_key, mutation_count + ) + ) + + if (self.total_mutation_count + mutation_count) >= MAX_MUTATIONS: + self.flush() + + self.rows.append(row) + self.total_mutation_count += mutation_count + self.total_size += row.get_mutations_size() - if self._rows.full(): - self._flush_async() + if self.total_size >= self.max_row_bytes or len(self.rows) >= self.flush_count: + self.flush() def mutate_rows(self, rows): """Add multiple rows to the batch. If the current batch meets one of the size - limits, the batch is sent asynchronously. + limits, the batch is sent synchronously. For example: - .. literalinclude:: snippets_table.py + .. literalinclude:: snippets.py :start-after: [START bigtable_api_batcher_mutate_rows] :end-before: [END bigtable_api_batcher_mutate_rows] :dedent: 4 @@ -266,119 +119,28 @@ def mutate_rows(self, rows): :param rows: list:[`~google.cloud.bigtable.row.DirectRow`]. :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried + * :exc:`~.table._BigtableRetryableError` if any + row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't + match the number of rows that were retried + * :exc:`.batcher.MaxMutationsError` if any row exceeds max + mutations count. """ for row in rows: self.mutate(row) def flush(self): - """Sends the current batch to Cloud Bigtable synchronously. + """Sends the current. batch to Cloud Bigtable. For example: - .. literalinclude:: snippets_table.py + .. literalinclude:: snippets.py :start-after: [START bigtable_api_batcher_flush] :end-before: [END bigtable_api_batcher_flush] :dedent: 4 - :raises: - * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. - """ - rows_to_flush = [] - while not self._rows.empty(): - rows_to_flush.append(self._rows.get()) - response = self._flush_rows(rows_to_flush) - return response - - def _flush_async(self): - """Sends the current batch to Cloud Bigtable asynchronously. - - :raises: - * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. - """ - - rows_to_flush = [] - mutations_count = 0 - mutations_size = 0 - rows_count = 0 - batch_info = _BatchInfo() - - while not self._rows.empty(): - row = self._rows.get() - mutations_count += len(row._get_mutations()) - mutations_size += row.get_mutations_size() - rows_count += 1 - rows_to_flush.append(row) - batch_info.mutations_count = mutations_count - batch_info.rows_count = rows_count - batch_info.mutations_size = mutations_size - - if ( - rows_count >= self.flush_count - or mutations_size >= self.max_row_bytes - or mutations_count >= self.flow_control.max_mutations - or mutations_size >= self.flow_control.max_mutation_bytes - or self._rows.empty() # submit when it reached the end of the queue - ): - # wait for resources to become available, before submitting any new batch - self.flow_control.wait() - # once unblocked, submit a batch - # event flag will be set by control_flow to block subsequent thread, but not blocking this one - self.flow_control.control_flow(batch_info) - future = self._executor.submit(self._flush_rows, rows_to_flush) - self.futures_mapping[future] = batch_info - future.add_done_callback(self._batch_completed_callback) - - # reset and start a new batch - rows_to_flush = [] - mutations_size = 0 - rows_count = 0 - mutations_count = 0 - batch_info = _BatchInfo() - - def _batch_completed_callback(self, future): - """Callback for when the mutation has finished. - - Raise exceptions if there's any. - Release the resources locked by the flow control and allow enqueued tasks to be run. - """ - - processed_rows = self.futures_mapping[future] - self.flow_control.release(processed_rows) - del self.futures_mapping[future] - - def _flush_rows(self, rows_to_flush): - """Mutate the specified rows. - - :raises: - * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. - """ - responses = [] - if len(rows_to_flush) > 0: - response = self.table.mutate_rows(rows_to_flush) - - for result in response: - if result.code != 0: - exc = from_grpc_status(result.code, result.message) - self.exceptions.put(exc) - responses.append(result) - - return responses - - def __exit__(self, exc_type, exc_value, exc_traceback): - """Clean up resources. Flush and shutdown the ThreadPoolExecutor.""" - self.close() - - def close(self): - """Clean up resources. Flush and shutdown the ThreadPoolExecutor. - Any errors will be raised. - - :raises: - * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. """ - self.flush() - self._executor.shutdown(wait=True) - atexit.unregister(self.close) - if self.exceptions.qsize() > 0: - exc = list(self.exceptions.queue) - raise MutationsBatchError("Errors in batch mutations.", exc=exc) + if len(self.rows) != 0: + self.table.mutate_rows(self.rows) + self.total_mutation_count = 0 + self.total_size = 0 + self.rows = [] diff --git a/google/cloud/bigtable/table.py b/google/cloud/bigtable/table.py index e3191a729..8605992ba 100644 --- a/google/cloud/bigtable/table.py +++ b/google/cloud/bigtable/table.py @@ -32,7 +32,7 @@ from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.batcher import MutationsBatcher -from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_MUTATION_SIZE +from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_ROW_BYTES from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.row import AppendRow @@ -844,9 +844,7 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): request={"name": self.name, "row_key_prefix": _to_bytes(row_key_prefix)} ) - def mutations_batcher( - self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_MUTATION_SIZE - ): + def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): """Factory to create a mutation batcher associated with this instance. For example: diff --git a/tests/unit/test_batcher.py b/tests/unit/test_batcher.py index a238b2852..9ae6ed175 100644 --- a/tests/unit/test_batcher.py +++ b/tests/unit/test_batcher.py @@ -14,118 +14,122 @@ import mock -import time - import pytest from google.cloud.bigtable.row import DirectRow -from google.cloud.bigtable.batcher import ( - _FlowControl, - MutationsBatcher, - MutationsBatchError, -) TABLE_ID = "table-id" TABLE_NAME = "/tables/" + TABLE_ID +def _make_mutation_batcher(table, **kw): + from google.cloud.bigtable.batcher import MutationsBatcher + + return MutationsBatcher(table, **kw) + + def test_mutation_batcher_constructor(): table = _Table(TABLE_NAME) - with MutationsBatcher(table) as mutation_batcher: - assert table is mutation_batcher.table + + mutation_batcher = _make_mutation_batcher(table) + assert table is mutation_batcher.table def test_mutation_batcher_mutate_row(): table = _Table(TABLE_NAME) - with MutationsBatcher(table=table) as mutation_batcher: + mutation_batcher = _make_mutation_batcher(table=table) - rows = [ - DirectRow(row_key=b"row_key"), - DirectRow(row_key=b"row_key_2"), - DirectRow(row_key=b"row_key_3"), - DirectRow(row_key=b"row_key_4"), - ] + rows = [ + DirectRow(row_key=b"row_key"), + DirectRow(row_key=b"row_key_2"), + DirectRow(row_key=b"row_key_3"), + DirectRow(row_key=b"row_key_4"), + ] - mutation_batcher.mutate_rows(rows) + mutation_batcher.mutate_rows(rows) + mutation_batcher.flush() assert table.mutation_calls == 1 def test_mutation_batcher_mutate(): table = _Table(TABLE_NAME) - with MutationsBatcher(table=table) as mutation_batcher: + mutation_batcher = _make_mutation_batcher(table=table) - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) + row.set_cell("cf1", b"c4", 4) - mutation_batcher.mutate(row) + mutation_batcher.mutate(row) + + mutation_batcher.flush() assert table.mutation_calls == 1 def test_mutation_batcher_flush_w_no_rows(): table = _Table(TABLE_NAME) - with MutationsBatcher(table=table) as mutation_batcher: - mutation_batcher.flush() + mutation_batcher = _make_mutation_batcher(table=table) + mutation_batcher.flush() assert table.mutation_calls == 0 def test_mutation_batcher_mutate_w_max_flush_count(): table = _Table(TABLE_NAME) - with MutationsBatcher(table=table, flush_count=3) as mutation_batcher: + mutation_batcher = _make_mutation_batcher(table=table, flush_count=3) - row_1 = DirectRow(row_key=b"row_key_1") - row_2 = DirectRow(row_key=b"row_key_2") - row_3 = DirectRow(row_key=b"row_key_3") + row_1 = DirectRow(row_key=b"row_key_1") + row_2 = DirectRow(row_key=b"row_key_2") + row_3 = DirectRow(row_key=b"row_key_3") - mutation_batcher.mutate(row_1) - mutation_batcher.mutate(row_2) - mutation_batcher.mutate(row_3) + mutation_batcher.mutate(row_1) + mutation_batcher.mutate(row_2) + mutation_batcher.mutate(row_3) assert table.mutation_calls == 1 -@mock.patch("google.cloud.bigtable.batcher.MAX_OUTSTANDING_ELEMENTS", new=3) -def test_mutation_batcher_mutate_w_max_mutations(): +@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) +def test_mutation_batcher_mutate_with_max_mutations_failure(): + from google.cloud.bigtable.batcher import MaxMutationsError + table = _Table(TABLE_NAME) - with MutationsBatcher(table=table) as mutation_batcher: + mutation_batcher = _make_mutation_batcher(table=table) - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) + row.set_cell("cf1", b"c4", 4) + with pytest.raises(MaxMutationsError): mutation_batcher.mutate(row) - assert table.mutation_calls == 1 - -def test_mutation_batcher_mutate_w_max_row_bytes(): +@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) +def test_mutation_batcher_mutate_w_max_mutations(): table = _Table(TABLE_NAME) - with MutationsBatcher( - table=table, max_row_bytes=3 * 1024 * 1024 - ) as mutation_batcher: + mutation_batcher = _make_mutation_batcher(table=table) - number_of_bytes = 1 * 1024 * 1024 - max_value = b"1" * number_of_bytes - - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", max_value) - row.set_cell("cf1", b"c2", max_value) - row.set_cell("cf1", b"c3", max_value) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) - mutation_batcher.mutate(row) + mutation_batcher.mutate(row) + mutation_batcher.flush() assert table.mutation_calls == 1 -def test_mutations_batcher_flushed_when_closed(): +def test_mutation_batcher_mutate_w_max_row_bytes(): table = _Table(TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table, max_row_bytes=3 * 1024 * 1024) + mutation_batcher = _make_mutation_batcher( + table=table, max_row_bytes=3 * 1024 * 1024 + ) number_of_bytes = 1 * 1024 * 1024 max_value = b"1" * number_of_bytes @@ -133,108 +137,13 @@ def test_mutations_batcher_flushed_when_closed(): row = DirectRow(row_key=b"row_key") row.set_cell("cf1", b"c1", max_value) row.set_cell("cf1", b"c2", max_value) + row.set_cell("cf1", b"c3", max_value) mutation_batcher.mutate(row) - assert table.mutation_calls == 0 - - mutation_batcher.close() - - assert table.mutation_calls == 1 - - -def test_mutations_batcher_context_manager_flushed_when_closed(): - table = _Table(TABLE_NAME) - with MutationsBatcher( - table=table, max_row_bytes=3 * 1024 * 1024 - ) as mutation_batcher: - - number_of_bytes = 1 * 1024 * 1024 - max_value = b"1" * number_of_bytes - - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", max_value) - row.set_cell("cf1", b"c2", max_value) - - mutation_batcher.mutate(row) assert table.mutation_calls == 1 -@mock.patch("google.cloud.bigtable.batcher.MutationsBatcher.flush") -def test_mutations_batcher_flush_interval(mocked_flush): - table = _Table(TABLE_NAME) - flush_interval = 0.5 - mutation_batcher = MutationsBatcher(table=table, flush_interval=flush_interval) - - assert mutation_batcher._timer.interval == flush_interval - mocked_flush.assert_not_called() - - time.sleep(0.4) - mocked_flush.assert_not_called() - - time.sleep(0.1) - mocked_flush.assert_called_once_with() - - mutation_batcher.close() - - -def test_mutations_batcher_response_with_error_codes(): - from google.rpc.status_pb2 import Status - - mocked_response = [Status(code=1), Status(code=5)] - - with mock.patch("tests.unit.test_batcher._Table") as mocked_table: - table = mocked_table.return_value - mutation_batcher = MutationsBatcher(table=table) - - row1 = DirectRow(row_key=b"row_key") - row2 = DirectRow(row_key=b"row_key") - table.mutate_rows.return_value = mocked_response - - mutation_batcher.mutate_rows([row1, row2]) - with pytest.raises(MutationsBatchError) as exc: - mutation_batcher.close() - assert exc.value.message == "Errors in batch mutations." - assert len(exc.value.exc) == 2 - - assert exc.value.exc[0].message == mocked_response[0].message - assert exc.value.exc[1].message == mocked_response[1].message - - -def test_flow_control_event_is_set_when_not_blocked(): - flow_control = _FlowControl() - - flow_control.set_flow_control_status() - assert flow_control.event.is_set() - - -def test_flow_control_event_is_not_set_when_blocked(): - flow_control = _FlowControl() - - flow_control.inflight_mutations = flow_control.max_mutations - flow_control.inflight_size = flow_control.max_mutation_bytes - - flow_control.set_flow_control_status() - assert not flow_control.event.is_set() - - -@mock.patch("concurrent.futures.ThreadPoolExecutor.submit") -def test_flush_async_batch_count(mocked_executor_submit): - table = _Table(TABLE_NAME) - mutation_batcher = MutationsBatcher(table=table, flush_count=2) - - number_of_bytes = 1 * 1024 * 1024 - max_value = b"1" * number_of_bytes - for index in range(5): - row = DirectRow(row_key=f"row_key_{index}") - row.set_cell("cf1", b"c1", max_value) - mutation_batcher.mutate(row) - mutation_batcher._flush_async() - - # 3 batches submitted. 2 batches of 2 items, and the last one a single item batch. - assert mocked_executor_submit.call_count == 3 - - class _Instance(object): def __init__(self, client=None): self._client = client @@ -247,8 +156,5 @@ def __init__(self, name, client=None): self.mutation_calls = 0 def mutate_rows(self, rows): - from google.rpc.status_pb2 import Status - self.mutation_calls += 1 - - return [Status(code=0) for _ in rows] + return rows From a4e73251c6cd6ce83d9b812120b5321b0ab70280 Mon Sep 17 00:00:00 2001 From: Mattie Fu Date: Thu, 11 May 2023 15:59:32 -0400 Subject: [PATCH 12/52] Revert "fix: Revert "Feat: Threaded MutationsBatcher" (#773)" (#775) This reverts commit a767cff95d990994f85f5fd05cc10f952087b49d. --- docs/batcher.rst | 6 + docs/usage.rst | 1 + google/cloud/bigtable/batcher.py | 366 +++++++++++++++++++++++++------ google/cloud/bigtable/table.py | 6 +- tests/unit/test_batcher.py | 218 ++++++++++++------ 5 files changed, 469 insertions(+), 128 deletions(-) create mode 100644 docs/batcher.rst diff --git a/docs/batcher.rst b/docs/batcher.rst new file mode 100644 index 000000000..9ac335be1 --- /dev/null +++ b/docs/batcher.rst @@ -0,0 +1,6 @@ +Mutations Batching +~~~~~~~~~~~~~~~~~~ + +.. automodule:: google.cloud.bigtable.batcher + :members: + :show-inheritance: diff --git a/docs/usage.rst b/docs/usage.rst index 33bf7bb7f..73a32b039 100644 --- a/docs/usage.rst +++ b/docs/usage.rst @@ -17,6 +17,7 @@ Using the API row-data row-filters row-set + batcher In the hierarchy of API concepts diff --git a/google/cloud/bigtable/batcher.py b/google/cloud/bigtable/batcher.py index 3c23f4436..6b06ec060 100644 --- a/google/cloud/bigtable/batcher.py +++ b/google/cloud/bigtable/batcher.py @@ -13,104 +13,251 @@ # limitations under the License. """User friendly container for Google Cloud Bigtable MutationBatcher.""" +import threading +import queue +import concurrent.futures +import atexit -FLUSH_COUNT = 1000 -MAX_MUTATIONS = 100000 -MAX_ROW_BYTES = 5242880 # 5MB +from google.api_core.exceptions import from_grpc_status +from dataclasses import dataclass -class MaxMutationsError(ValueError): - """The number of mutations for bulk request is too big.""" +FLUSH_COUNT = 100 # after this many elements, send out the batch + +MAX_MUTATION_SIZE = 20 * 1024 * 1024 # 20MB # after this many bytes, send out the batch + +MAX_OUTSTANDING_BYTES = 100 * 1024 * 1024 # 100MB # max inflight byte size. + +MAX_OUTSTANDING_ELEMENTS = 100000 # max inflight mutations. + + +class MutationsBatchError(Exception): + """Error in the batch request""" + + def __init__(self, message, exc): + self.exc = exc + self.message = message + super().__init__(self.message) + + +class _MutationsBatchQueue(object): + """Private Threadsafe Queue to hold rows for batching.""" + + def __init__(self, max_mutation_bytes=MAX_MUTATION_SIZE, flush_count=FLUSH_COUNT): + """Specify the queue constraints""" + self._queue = queue.Queue() + self.total_mutation_count = 0 + self.total_size = 0 + self.max_mutation_bytes = max_mutation_bytes + self.flush_count = flush_count + + def get(self): + """Retrieve an item from the queue. Recalculate queue size.""" + row = self._queue.get() + mutation_size = row.get_mutations_size() + self.total_mutation_count -= len(row._get_mutations()) + self.total_size -= mutation_size + return row + + def put(self, item): + """Insert an item to the queue. Recalculate queue size.""" + + mutation_count = len(item._get_mutations()) + + self._queue.put(item) + + self.total_size += item.get_mutations_size() + self.total_mutation_count += mutation_count + + def full(self): + """Check if the queue is full.""" + if ( + self.total_mutation_count >= self.flush_count + or self.total_size >= self.max_mutation_bytes + ): + return True + return False + + def empty(self): + return self._queue.empty() + + +@dataclass +class _BatchInfo: + """Keeping track of size of a batch""" + + mutations_count: int = 0 + rows_count: int = 0 + mutations_size: int = 0 + + +class _FlowControl(object): + def __init__( + self, + max_mutations=MAX_OUTSTANDING_ELEMENTS, + max_mutation_bytes=MAX_OUTSTANDING_BYTES, + ): + """Control the inflight requests. Keep track of the mutations, row bytes and row counts. + As requests to backend are being made, adjust the number of mutations being processed. + + If threshold is reached, block the flow. + Reopen the flow as requests are finished. + """ + self.max_mutations = max_mutations + self.max_mutation_bytes = max_mutation_bytes + self.inflight_mutations = 0 + self.inflight_size = 0 + self.event = threading.Event() + self.event.set() + + def is_blocked(self): + """Returns True if: + + - inflight mutations >= max_mutations, or + - inflight bytes size >= max_mutation_bytes, or + """ + + return ( + self.inflight_mutations >= self.max_mutations + or self.inflight_size >= self.max_mutation_bytes + ) + + def control_flow(self, batch_info): + """ + Calculate the resources used by this batch + """ + + self.inflight_mutations += batch_info.mutations_count + self.inflight_size += batch_info.mutations_size + self.set_flow_control_status() + + def wait(self): + """ + Wait until flow control pushback has been released. + It awakens as soon as `event` is set. + """ + self.event.wait() + + def set_flow_control_status(self): + """Check the inflight mutations and size. + + If values exceed the allowed threshold, block the event. + """ + if self.is_blocked(): + self.event.clear() # sleep + else: + self.event.set() # awaken the threads + + def release(self, batch_info): + """ + Release the resources. + Decrement the row size to allow enqueued mutations to be run. + """ + self.inflight_mutations -= batch_info.mutations_count + self.inflight_size -= batch_info.mutations_size + self.set_flow_control_status() class MutationsBatcher(object): """A MutationsBatcher is used in batch cases where the number of mutations - is large or unknown. It will store DirectRows in memory until one of the - size limits is reached, or an explicit call to flush() is performed. When - a flush event occurs, the DirectRows in memory will be sent to Cloud + is large or unknown. It will store :class:`DirectRow` in memory until one of the + size limits is reached, or an explicit call to :func:`flush()` is performed. When + a flush event occurs, the :class:`DirectRow` in memory will be sent to Cloud Bigtable. Batching mutations is more efficient than sending individual request. This class is not suited for usage in systems where each mutation must be guaranteed to be sent, since calling mutate may only result in an - in-memory change. In a case of a system crash, any DirectRows remaining in + in-memory change. In a case of a system crash, any :class:`DirectRow` remaining in memory will not necessarily be sent to the service, even after the - completion of the mutate() method. + completion of the :func:`mutate()` method. - TODO: Performance would dramatically improve if this class had the - capability of asynchronous, parallel RPCs. + Note on thread safety: The same :class:`MutationBatcher` cannot be shared by multiple end-user threads. :type table: class :param table: class:`~google.cloud.bigtable.table.Table`. :type flush_count: int :param flush_count: (Optional) Max number of rows to flush. If it - reaches the max number of rows it calls finish_batch() to mutate the - current row batch. Default is FLUSH_COUNT (1000 rows). + reaches the max number of rows it calls finish_batch() to mutate the + current row batch. Default is FLUSH_COUNT (1000 rows). :type max_row_bytes: int :param max_row_bytes: (Optional) Max number of row mutations size to - flush. If it reaches the max number of row mutations size it calls - finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES - (5 MB). + flush. If it reaches the max number of row mutations size it calls + finish_batch() to mutate the current row batch. Default is MAX_ROW_BYTES + (5 MB). + + :type flush_interval: float + :param flush_interval: (Optional) The interval (in seconds) between asynchronous flush. + Default is 1 second. """ - def __init__(self, table, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): - self.rows = [] - self.total_mutation_count = 0 - self.total_size = 0 + def __init__( + self, + table, + flush_count=FLUSH_COUNT, + max_row_bytes=MAX_MUTATION_SIZE, + flush_interval=1, + ): + self._rows = _MutationsBatchQueue( + max_mutation_bytes=max_row_bytes, flush_count=flush_count + ) self.table = table - self.flush_count = flush_count - self.max_row_bytes = max_row_bytes + self._executor = concurrent.futures.ThreadPoolExecutor() + atexit.register(self.close) + self._timer = threading.Timer(flush_interval, self.flush) + self._timer.start() + self.flow_control = _FlowControl( + max_mutations=MAX_OUTSTANDING_ELEMENTS, + max_mutation_bytes=MAX_OUTSTANDING_BYTES, + ) + self.futures_mapping = {} + self.exceptions = queue.Queue() + + @property + def flush_count(self): + return self._rows.flush_count + + @property + def max_row_bytes(self): + return self._rows.max_mutation_bytes + + def __enter__(self): + """Starting the MutationsBatcher as a context manager""" + return self def mutate(self, row): """Add a row to the batch. If the current batch meets one of the size - limits, the batch is sent synchronously. + limits, the batch is sent asynchronously. For example: - .. literalinclude:: snippets.py + .. literalinclude:: snippets_table.py :start-after: [START bigtable_api_batcher_mutate] :end-before: [END bigtable_api_batcher_mutate] :dedent: 4 :type row: class - :param row: class:`~google.cloud.bigtable.row.DirectRow`. + :param row: :class:`~google.cloud.bigtable.row.DirectRow`. :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any - row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't - match the number of rows that were retried - * :exc:`.batcher.MaxMutationsError` if any row exceeds max - mutations count. - """ - mutation_count = len(row._get_mutations()) - if mutation_count > MAX_MUTATIONS: - raise MaxMutationsError( - "The row key {} exceeds the number of mutations {}.".format( - row.row_key, mutation_count - ) - ) - - if (self.total_mutation_count + mutation_count) >= MAX_MUTATIONS: - self.flush() - - self.rows.append(row) - self.total_mutation_count += mutation_count - self.total_size += row.get_mutations_size() + * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried + """ + self._rows.put(row) - if self.total_size >= self.max_row_bytes or len(self.rows) >= self.flush_count: - self.flush() + if self._rows.full(): + self._flush_async() def mutate_rows(self, rows): """Add multiple rows to the batch. If the current batch meets one of the size - limits, the batch is sent synchronously. + limits, the batch is sent asynchronously. For example: - .. literalinclude:: snippets.py + .. literalinclude:: snippets_table.py :start-after: [START bigtable_api_batcher_mutate_rows] :end-before: [END bigtable_api_batcher_mutate_rows] :dedent: 4 @@ -119,28 +266,119 @@ def mutate_rows(self, rows): :param rows: list:[`~google.cloud.bigtable.row.DirectRow`]. :raises: One of the following: - * :exc:`~.table._BigtableRetryableError` if any - row returned a transient error. - * :exc:`RuntimeError` if the number of responses doesn't - match the number of rows that were retried - * :exc:`.batcher.MaxMutationsError` if any row exceeds max - mutations count. + * :exc:`~.table._BigtableRetryableError` if any row returned a transient error. + * :exc:`RuntimeError` if the number of responses doesn't match the number of rows that were retried """ for row in rows: self.mutate(row) def flush(self): - """Sends the current. batch to Cloud Bigtable. + """Sends the current batch to Cloud Bigtable synchronously. For example: - .. literalinclude:: snippets.py + .. literalinclude:: snippets_table.py :start-after: [START bigtable_api_batcher_flush] :end-before: [END bigtable_api_batcher_flush] :dedent: 4 + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + rows_to_flush = [] + while not self._rows.empty(): + rows_to_flush.append(self._rows.get()) + response = self._flush_rows(rows_to_flush) + return response + + def _flush_async(self): + """Sends the current batch to Cloud Bigtable asynchronously. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + + rows_to_flush = [] + mutations_count = 0 + mutations_size = 0 + rows_count = 0 + batch_info = _BatchInfo() + + while not self._rows.empty(): + row = self._rows.get() + mutations_count += len(row._get_mutations()) + mutations_size += row.get_mutations_size() + rows_count += 1 + rows_to_flush.append(row) + batch_info.mutations_count = mutations_count + batch_info.rows_count = rows_count + batch_info.mutations_size = mutations_size + + if ( + rows_count >= self.flush_count + or mutations_size >= self.max_row_bytes + or mutations_count >= self.flow_control.max_mutations + or mutations_size >= self.flow_control.max_mutation_bytes + or self._rows.empty() # submit when it reached the end of the queue + ): + # wait for resources to become available, before submitting any new batch + self.flow_control.wait() + # once unblocked, submit a batch + # event flag will be set by control_flow to block subsequent thread, but not blocking this one + self.flow_control.control_flow(batch_info) + future = self._executor.submit(self._flush_rows, rows_to_flush) + self.futures_mapping[future] = batch_info + future.add_done_callback(self._batch_completed_callback) + + # reset and start a new batch + rows_to_flush = [] + mutations_size = 0 + rows_count = 0 + mutations_count = 0 + batch_info = _BatchInfo() + + def _batch_completed_callback(self, future): + """Callback for when the mutation has finished. + + Raise exceptions if there's any. + Release the resources locked by the flow control and allow enqueued tasks to be run. + """ + + processed_rows = self.futures_mapping[future] + self.flow_control.release(processed_rows) + del self.futures_mapping[future] + + def _flush_rows(self, rows_to_flush): + """Mutate the specified rows. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. + """ + responses = [] + if len(rows_to_flush) > 0: + response = self.table.mutate_rows(rows_to_flush) + + for result in response: + if result.code != 0: + exc = from_grpc_status(result.code, result.message) + self.exceptions.put(exc) + responses.append(result) + + return responses + + def __exit__(self, exc_type, exc_value, exc_traceback): + """Clean up resources. Flush and shutdown the ThreadPoolExecutor.""" + self.close() + + def close(self): + """Clean up resources. Flush and shutdown the ThreadPoolExecutor. + Any errors will be raised. + + :raises: + * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. """ - if len(self.rows) != 0: - self.table.mutate_rows(self.rows) - self.total_mutation_count = 0 - self.total_size = 0 - self.rows = [] + self.flush() + self._executor.shutdown(wait=True) + atexit.unregister(self.close) + if self.exceptions.qsize() > 0: + exc = list(self.exceptions.queue) + raise MutationsBatchError("Errors in batch mutations.", exc=exc) diff --git a/google/cloud/bigtable/table.py b/google/cloud/bigtable/table.py index 8605992ba..e3191a729 100644 --- a/google/cloud/bigtable/table.py +++ b/google/cloud/bigtable/table.py @@ -32,7 +32,7 @@ from google.cloud.bigtable.column_family import _gc_rule_from_pb from google.cloud.bigtable.column_family import ColumnFamily from google.cloud.bigtable.batcher import MutationsBatcher -from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_ROW_BYTES +from google.cloud.bigtable.batcher import FLUSH_COUNT, MAX_MUTATION_SIZE from google.cloud.bigtable.encryption_info import EncryptionInfo from google.cloud.bigtable.policy import Policy from google.cloud.bigtable.row import AppendRow @@ -844,7 +844,9 @@ def drop_by_prefix(self, row_key_prefix, timeout=None): request={"name": self.name, "row_key_prefix": _to_bytes(row_key_prefix)} ) - def mutations_batcher(self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_ROW_BYTES): + def mutations_batcher( + self, flush_count=FLUSH_COUNT, max_row_bytes=MAX_MUTATION_SIZE + ): """Factory to create a mutation batcher associated with this instance. For example: diff --git a/tests/unit/test_batcher.py b/tests/unit/test_batcher.py index 9ae6ed175..a238b2852 100644 --- a/tests/unit/test_batcher.py +++ b/tests/unit/test_batcher.py @@ -14,122 +14,118 @@ import mock +import time + import pytest from google.cloud.bigtable.row import DirectRow +from google.cloud.bigtable.batcher import ( + _FlowControl, + MutationsBatcher, + MutationsBatchError, +) TABLE_ID = "table-id" TABLE_NAME = "/tables/" + TABLE_ID -def _make_mutation_batcher(table, **kw): - from google.cloud.bigtable.batcher import MutationsBatcher - - return MutationsBatcher(table, **kw) - - def test_mutation_batcher_constructor(): table = _Table(TABLE_NAME) - - mutation_batcher = _make_mutation_batcher(table) - assert table is mutation_batcher.table + with MutationsBatcher(table) as mutation_batcher: + assert table is mutation_batcher.table def test_mutation_batcher_mutate_row(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - rows = [ - DirectRow(row_key=b"row_key"), - DirectRow(row_key=b"row_key_2"), - DirectRow(row_key=b"row_key_3"), - DirectRow(row_key=b"row_key_4"), - ] + rows = [ + DirectRow(row_key=b"row_key"), + DirectRow(row_key=b"row_key_2"), + DirectRow(row_key=b"row_key_3"), + DirectRow(row_key=b"row_key_4"), + ] - mutation_batcher.mutate_rows(rows) - mutation_batcher.flush() + mutation_batcher.mutate_rows(rows) assert table.mutation_calls == 1 def test_mutation_batcher_mutate(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) - - mutation_batcher.mutate(row) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) + row.set_cell("cf1", b"c4", 4) - mutation_batcher.flush() + mutation_batcher.mutate(row) assert table.mutation_calls == 1 def test_mutation_batcher_flush_w_no_rows(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) - mutation_batcher.flush() + with MutationsBatcher(table=table) as mutation_batcher: + mutation_batcher.flush() assert table.mutation_calls == 0 def test_mutation_batcher_mutate_w_max_flush_count(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table, flush_count=3) + with MutationsBatcher(table=table, flush_count=3) as mutation_batcher: - row_1 = DirectRow(row_key=b"row_key_1") - row_2 = DirectRow(row_key=b"row_key_2") - row_3 = DirectRow(row_key=b"row_key_3") + row_1 = DirectRow(row_key=b"row_key_1") + row_2 = DirectRow(row_key=b"row_key_2") + row_3 = DirectRow(row_key=b"row_key_3") - mutation_batcher.mutate(row_1) - mutation_batcher.mutate(row_2) - mutation_batcher.mutate(row_3) + mutation_batcher.mutate(row_1) + mutation_batcher.mutate(row_2) + mutation_batcher.mutate(row_3) assert table.mutation_calls == 1 -@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) -def test_mutation_batcher_mutate_with_max_mutations_failure(): - from google.cloud.bigtable.batcher import MaxMutationsError - +@mock.patch("google.cloud.bigtable.batcher.MAX_OUTSTANDING_ELEMENTS", new=3) +def test_mutation_batcher_mutate_w_max_mutations(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) - row.set_cell("cf1", b"c4", 4) + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", 1) + row.set_cell("cf1", b"c2", 2) + row.set_cell("cf1", b"c3", 3) - with pytest.raises(MaxMutationsError): mutation_batcher.mutate(row) + assert table.mutation_calls == 1 + -@mock.patch("google.cloud.bigtable.batcher.MAX_MUTATIONS", new=3) -def test_mutation_batcher_mutate_w_max_mutations(): +def test_mutation_batcher_mutate_w_max_row_bytes(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher(table=table) + with MutationsBatcher( + table=table, max_row_bytes=3 * 1024 * 1024 + ) as mutation_batcher: - row = DirectRow(row_key=b"row_key") - row.set_cell("cf1", b"c1", 1) - row.set_cell("cf1", b"c2", 2) - row.set_cell("cf1", b"c3", 3) + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes - mutation_batcher.mutate(row) - mutation_batcher.flush() + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", max_value) + row.set_cell("cf1", b"c2", max_value) + row.set_cell("cf1", b"c3", max_value) + + mutation_batcher.mutate(row) assert table.mutation_calls == 1 -def test_mutation_batcher_mutate_w_max_row_bytes(): +def test_mutations_batcher_flushed_when_closed(): table = _Table(TABLE_NAME) - mutation_batcher = _make_mutation_batcher( - table=table, max_row_bytes=3 * 1024 * 1024 - ) + mutation_batcher = MutationsBatcher(table=table, max_row_bytes=3 * 1024 * 1024) number_of_bytes = 1 * 1024 * 1024 max_value = b"1" * number_of_bytes @@ -137,13 +133,108 @@ def test_mutation_batcher_mutate_w_max_row_bytes(): row = DirectRow(row_key=b"row_key") row.set_cell("cf1", b"c1", max_value) row.set_cell("cf1", b"c2", max_value) - row.set_cell("cf1", b"c3", max_value) mutation_batcher.mutate(row) + assert table.mutation_calls == 0 + + mutation_batcher.close() + + assert table.mutation_calls == 1 + + +def test_mutations_batcher_context_manager_flushed_when_closed(): + table = _Table(TABLE_NAME) + with MutationsBatcher( + table=table, max_row_bytes=3 * 1024 * 1024 + ) as mutation_batcher: + + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes + + row = DirectRow(row_key=b"row_key") + row.set_cell("cf1", b"c1", max_value) + row.set_cell("cf1", b"c2", max_value) + + mutation_batcher.mutate(row) assert table.mutation_calls == 1 +@mock.patch("google.cloud.bigtable.batcher.MutationsBatcher.flush") +def test_mutations_batcher_flush_interval(mocked_flush): + table = _Table(TABLE_NAME) + flush_interval = 0.5 + mutation_batcher = MutationsBatcher(table=table, flush_interval=flush_interval) + + assert mutation_batcher._timer.interval == flush_interval + mocked_flush.assert_not_called() + + time.sleep(0.4) + mocked_flush.assert_not_called() + + time.sleep(0.1) + mocked_flush.assert_called_once_with() + + mutation_batcher.close() + + +def test_mutations_batcher_response_with_error_codes(): + from google.rpc.status_pb2 import Status + + mocked_response = [Status(code=1), Status(code=5)] + + with mock.patch("tests.unit.test_batcher._Table") as mocked_table: + table = mocked_table.return_value + mutation_batcher = MutationsBatcher(table=table) + + row1 = DirectRow(row_key=b"row_key") + row2 = DirectRow(row_key=b"row_key") + table.mutate_rows.return_value = mocked_response + + mutation_batcher.mutate_rows([row1, row2]) + with pytest.raises(MutationsBatchError) as exc: + mutation_batcher.close() + assert exc.value.message == "Errors in batch mutations." + assert len(exc.value.exc) == 2 + + assert exc.value.exc[0].message == mocked_response[0].message + assert exc.value.exc[1].message == mocked_response[1].message + + +def test_flow_control_event_is_set_when_not_blocked(): + flow_control = _FlowControl() + + flow_control.set_flow_control_status() + assert flow_control.event.is_set() + + +def test_flow_control_event_is_not_set_when_blocked(): + flow_control = _FlowControl() + + flow_control.inflight_mutations = flow_control.max_mutations + flow_control.inflight_size = flow_control.max_mutation_bytes + + flow_control.set_flow_control_status() + assert not flow_control.event.is_set() + + +@mock.patch("concurrent.futures.ThreadPoolExecutor.submit") +def test_flush_async_batch_count(mocked_executor_submit): + table = _Table(TABLE_NAME) + mutation_batcher = MutationsBatcher(table=table, flush_count=2) + + number_of_bytes = 1 * 1024 * 1024 + max_value = b"1" * number_of_bytes + for index in range(5): + row = DirectRow(row_key=f"row_key_{index}") + row.set_cell("cf1", b"c1", max_value) + mutation_batcher.mutate(row) + mutation_batcher._flush_async() + + # 3 batches submitted. 2 batches of 2 items, and the last one a single item batch. + assert mocked_executor_submit.call_count == 3 + + class _Instance(object): def __init__(self, client=None): self._client = client @@ -156,5 +247,8 @@ def __init__(self, name, client=None): self.mutation_calls = 0 def mutate_rows(self, rows): + from google.rpc.status_pb2 import Status + self.mutation_calls += 1 - return rows + + return [Status(code=0) for _ in rows] From 9e35ad2a4b0479f90f8e0353e0ac3a46185298d2 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Tue, 16 May 2023 11:56:46 +0200 Subject: [PATCH 13/52] chore(main): release 2.18.1 (#774) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .release-please-manifest.json | 2 +- CHANGELOG.md | 7 +++++++ google/cloud/bigtable/gapic_version.py | 2 +- google/cloud/bigtable_admin/gapic_version.py | 2 +- google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index a627e662e..e7a7a136b 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.18.0" + ".": "2.18.1" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 2d7fe5141..d56f02896 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,13 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.18.1](https://github.com/googleapis/python-bigtable/compare/v2.18.0...v2.18.1) (2023-05-11) + + +### Bug Fixes + +* Revert "Feat: Threaded MutationsBatcher" ([#773](https://github.com/googleapis/python-bigtable/issues/773)) ([a767cff](https://github.com/googleapis/python-bigtable/commit/a767cff95d990994f85f5fd05cc10f952087b49d)) + ## [2.18.0](https://github.com/googleapis/python-bigtable/compare/v2.17.0...v2.18.0) (2023-05-10) diff --git a/google/cloud/bigtable/gapic_version.py b/google/cloud/bigtable/gapic_version.py index f09943f6b..e1b4da1de 100644 --- a/google/cloud/bigtable/gapic_version.py +++ b/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.0" # {x-release-please-version} +__version__ = "2.18.1" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin/gapic_version.py b/google/cloud/bigtable_admin/gapic_version.py index f09943f6b..e1b4da1de 100644 --- a/google/cloud/bigtable_admin/gapic_version.py +++ b/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.0" # {x-release-please-version} +__version__ = "2.18.1" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin_v2/gapic_version.py b/google/cloud/bigtable_admin_v2/gapic_version.py index f09943f6b..e1b4da1de 100644 --- a/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.0" # {x-release-please-version} +__version__ = "2.18.1" # {x-release-please-version} diff --git a/google/cloud/bigtable_v2/gapic_version.py b/google/cloud/bigtable_v2/gapic_version.py index f09943f6b..e1b4da1de 100644 --- a/google/cloud/bigtable_v2/gapic_version.py +++ b/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.0" # {x-release-please-version} +__version__ = "2.18.1" # {x-release-please-version} From cef70f243541820225f86a520e0b2abd3a7354f7 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 10:31:36 -0400 Subject: [PATCH 14/52] feat: add ChangeStreamConfig to CreateTable and UpdateTable (#786) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add ChangeStreamConfig to CreateTable and UpdateTable PiperOrigin-RevId: 534836567 Source-Link: https://github.com/googleapis/googleapis/commit/eb2d1f1555df526abd00aa475e8fd5d014af6489 Source-Link: https://github.com/googleapis/googleapis-gen/commit/64cebcfc2765bff5afb19c140d4b1600dfdaebad Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNjRjZWJjZmMyNzY1YmZmNWFmYjE5YzE0MGQ0YjE2MDBkZmRhZWJhZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- google/cloud/bigtable_admin/__init__.py | 2 ++ google/cloud/bigtable_admin_v2/__init__.py | 2 ++ .../bigtable_table_admin/async_client.py | 13 +++++--- .../services/bigtable_table_admin/client.py | 13 +++++--- .../cloud/bigtable_admin_v2/types/__init__.py | 2 ++ .../types/bigtable_table_admin.py | 20 +++++++----- google/cloud/bigtable_admin_v2/types/table.py | 31 +++++++++++++++++++ .../test_bigtable_table_admin.py | 2 ++ 8 files changed, 67 insertions(+), 18 deletions(-) diff --git a/google/cloud/bigtable_admin/__init__.py b/google/cloud/bigtable_admin/__init__.py index 6ddc6acb2..0ba93ec63 100644 --- a/google/cloud/bigtable_admin/__init__.py +++ b/google/cloud/bigtable_admin/__init__.py @@ -200,6 +200,7 @@ from google.cloud.bigtable_admin_v2.types.instance import Instance from google.cloud.bigtable_admin_v2.types.table import Backup from google.cloud.bigtable_admin_v2.types.table import BackupInfo +from google.cloud.bigtable_admin_v2.types.table import ChangeStreamConfig from google.cloud.bigtable_admin_v2.types.table import ColumnFamily from google.cloud.bigtable_admin_v2.types.table import EncryptionInfo from google.cloud.bigtable_admin_v2.types.table import GcRule @@ -282,6 +283,7 @@ "Instance", "Backup", "BackupInfo", + "ChangeStreamConfig", "ColumnFamily", "EncryptionInfo", "GcRule", diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py index 282834fe7..c030ec1bd 100644 --- a/google/cloud/bigtable_admin_v2/__init__.py +++ b/google/cloud/bigtable_admin_v2/__init__.py @@ -92,6 +92,7 @@ from .types.instance import Instance from .types.table import Backup from .types.table import BackupInfo +from .types.table import ChangeStreamConfig from .types.table import ColumnFamily from .types.table import EncryptionInfo from .types.table import GcRule @@ -110,6 +111,7 @@ "BackupInfo", "BigtableInstanceAdminClient", "BigtableTableAdminClient", + "ChangeStreamConfig", "CheckConsistencyRequest", "CheckConsistencyResponse", "Cluster", diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 91f059f8b..1663c16eb 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -683,16 +683,19 @@ async def update_table( should not be set. update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): Required. The list of fields to update. A mask - specifying which fields (e.g. ``deletion_protection``) + specifying which fields (e.g. ``change_stream_config``) in the ``table`` field should be updated. This mask is relative to the ``table`` field, not to the request message. The wildcard (*) path is currently not supported. Currently UpdateTable is only supported for - the following field: + the following fields: - - ``deletion_protection`` If ``column_families`` is set - in ``update_mask``, it will return an UNIMPLEMENTED - error. + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + + If ``column_families`` is set in ``update_mask``, it + will return an UNIMPLEMENTED error. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index efceae90a..e043aa224 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -992,16 +992,19 @@ def update_table( should not be set. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The list of fields to update. A mask - specifying which fields (e.g. ``deletion_protection``) + specifying which fields (e.g. ``change_stream_config``) in the ``table`` field should be updated. This mask is relative to the ``table`` field, not to the request message. The wildcard (*) path is currently not supported. Currently UpdateTable is only supported for - the following field: + the following fields: - - ``deletion_protection`` If ``column_families`` is set - in ``update_mask``, it will return an UNIMPLEMENTED - error. + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + + If ``column_families`` is set in ``update_mask``, it + will return an UNIMPLEMENTED error. This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this diff --git a/google/cloud/bigtable_admin_v2/types/__init__.py b/google/cloud/bigtable_admin_v2/types/__init__.py index 5a66ddf09..69153c9fc 100644 --- a/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/google/cloud/bigtable_admin_v2/types/__init__.py @@ -91,6 +91,7 @@ from .table import ( Backup, BackupInfo, + ChangeStreamConfig, ColumnFamily, EncryptionInfo, GcRule, @@ -170,6 +171,7 @@ "Instance", "Backup", "BackupInfo", + "ChangeStreamConfig", "ColumnFamily", "EncryptionInfo", "GcRule", diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 9b236fea9..4c4b9e9e2 100644 --- a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -460,14 +460,18 @@ class UpdateTableRequest(proto.Message): used to identify the table to update. update_mask (google.protobuf.field_mask_pb2.FieldMask): Required. The list of fields to update. A mask specifying - which fields (e.g. ``deletion_protection``) in the ``table`` - field should be updated. This mask is relative to the - ``table`` field, not to the request message. The wildcard - (*) path is currently not supported. Currently UpdateTable - is only supported for the following field: - - - ``deletion_protection`` If ``column_families`` is set in - ``update_mask``, it will return an UNIMPLEMENTED error. + which fields (e.g. ``change_stream_config``) in the + ``table`` field should be updated. This mask is relative to + the ``table`` field, not to the request message. The + wildcard (*) path is currently not supported. Currently + UpdateTable is only supported for the following fields: + + - ``change_stream_config`` + - ``change_stream_config.retention_period`` + - ``deletion_protection`` + + If ``column_families`` is set in ``update_mask``, it will + return an UNIMPLEMENTED error. """ table: gba_table.Table = proto.Field( diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py index fd936df63..16d136e16 100644 --- a/google/cloud/bigtable_admin_v2/types/table.py +++ b/google/cloud/bigtable_admin_v2/types/table.py @@ -29,6 +29,7 @@ manifest={ "RestoreSourceType", "RestoreInfo", + "ChangeStreamConfig", "Table", "ColumnFamily", "GcRule", @@ -82,6 +83,27 @@ class RestoreInfo(proto.Message): ) +class ChangeStreamConfig(proto.Message): + r"""Change stream configuration. + + Attributes: + retention_period (google.protobuf.duration_pb2.Duration): + How long the change stream should be + retained. Change stream data older than the + retention period will not be returned when + reading the change stream from the table. + Values must be at least 1 day and at most 7 + days, and will be truncated to microsecond + granularity. + """ + + retention_period: duration_pb2.Duration = proto.Field( + proto.MESSAGE, + number=1, + message=duration_pb2.Duration, + ) + + class Table(proto.Message): r"""A collection of user data indexed by row, column, and timestamp. Each table is served using the resources of its @@ -114,6 +136,10 @@ class Table(proto.Message): another data source (e.g. a backup), this field will be populated with information about the restore. + change_stream_config (google.cloud.bigtable_admin_v2.types.ChangeStreamConfig): + If specified, enable the change stream on + this table. Otherwise, the change stream is + disabled and the change stream is not retained. deletion_protection (bool): Set to true to make the table protected against data loss. i.e. deleting the following @@ -263,6 +289,11 @@ class ReplicationState(proto.Enum): number=6, message="RestoreInfo", ) + change_stream_config: "ChangeStreamConfig" = proto.Field( + proto.MESSAGE, + number=8, + message="ChangeStreamConfig", + ) deletion_protection: bool = proto.Field( proto.BOOL, number=9, diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 8e4004ab1..8498e4fa5 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -8202,6 +8202,7 @@ def test_update_table_rest(request_type): "source_table": "source_table_value", }, }, + "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, "deletion_protection": True, } request = request_type(**request_init) @@ -8399,6 +8400,7 @@ def test_update_table_rest_bad_request( "source_table": "source_table_value", }, }, + "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, "deletion_protection": True, } request = request_type(**request_init) From ae477b5c0fed6f583d8bf98b05c9414c009e26d9 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 25 May 2023 12:38:11 -0400 Subject: [PATCH 15/52] build(deps): bump requests from 2.28.1 to 2.31.0 in /synthtool/gcp/templates/python_library/.kokoro (#790) Source-Link: https://github.com/googleapis/synthtool/commit/30bd01b4ab78bf1b2a425816e15b3e7e090993dd Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:9bc5fa3b62b091f60614c08a7fb4fd1d3e1678e326f34dd66ce1eefb5dc3267b Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 3 ++- .kokoro/requirements.txt | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index b8edda51c..32b3c4865 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,4 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:2e247c7bf5154df7f98cce087a20ca7605e236340c7d6d1a14447e5c06791bd6 + digest: sha256:9bc5fa3b62b091f60614c08a7fb4fd1d3e1678e326f34dd66ce1eefb5dc3267b +# created: 2023-05-25T14:56:16.294623272Z diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index 66a2172a7..3b8d7ee81 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -419,9 +419,9 @@ readme-renderer==37.3 \ --hash=sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273 \ --hash=sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343 # via twine -requests==2.28.1 \ - --hash=sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983 \ - --hash=sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349 +requests==2.31.0 \ + --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ + --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 # via # gcp-releasetool # google-api-core From ecf539c4c976fd9e5505b8abf0b697b218f09fef Mon Sep 17 00:00:00 2001 From: Sita Lakshmi Sangameswaran Date: Thu, 25 May 2023 22:29:46 +0530 Subject: [PATCH 16/52] docs(samples): add region tags (#788) * docs(samples): add read table snippet * remove snippet as it already exists --- samples/hello/main.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/samples/hello/main.py b/samples/hello/main.py index 7b2b1764a..5e47b4a38 100644 --- a/samples/hello/main.py +++ b/samples/hello/main.py @@ -87,26 +87,30 @@ def main(project_id, instance_id, table_id): # [START bigtable_hw_create_filter] # Create a filter to only retrieve the most recent version of the cell - # for each column accross entire row. + # for each column across entire row. row_filter = row_filters.CellsColumnLimitFilter(1) # [END bigtable_hw_create_filter] # [START bigtable_hw_get_with_filter] + # [START bigtable_hw_get_by_key] print("Getting a single greeting by row key.") key = "greeting0".encode() row = table.read_row(key, row_filter) cell = row.cells[column_family_id][column][0] print(cell.value.decode("utf-8")) + # [END bigtable_hw_get_by_key] # [END bigtable_hw_get_with_filter] # [START bigtable_hw_scan_with_filter] + # [START bigtable_hw_scan_all] print("Scanning for all greetings:") partial_rows = table.read_rows(filter_=row_filter) for row in partial_rows: cell = row.cells[column_family_id][column][0] print(cell.value.decode("utf-8")) + # [END bigtable_hw_scan_all] # [END bigtable_hw_scan_with_filter] # [START bigtable_hw_delete_table] From c4fd78f92d8bd04df8214a2c6f0a9e84da5c15b5 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sat, 3 Jun 2023 18:32:05 -0400 Subject: [PATCH 17/52] build(deps): bump cryptography from 39.0.1 to 41.0.0 in /synthtool/gcp/templates/python_library/.kokoro (#793) Source-Link: https://github.com/googleapis/synthtool/commit/d0f51a0c2a9a6bcca86911eabea9e484baadf64b Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:240b5bcc2bafd450912d2da2be15e62bc6de2cf839823ae4bf94d4f392b451dc Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 4 ++-- .kokoro/requirements.txt | 42 +++++++++++++++++++-------------------- 2 files changed, 22 insertions(+), 24 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 32b3c4865..02a4dedce 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:9bc5fa3b62b091f60614c08a7fb4fd1d3e1678e326f34dd66ce1eefb5dc3267b -# created: 2023-05-25T14:56:16.294623272Z + digest: sha256:240b5bcc2bafd450912d2da2be15e62bc6de2cf839823ae4bf94d4f392b451dc +# created: 2023-06-03T21:25:37.968717478Z diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index 3b8d7ee81..c7929db6d 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -113,28 +113,26 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==39.0.1 \ - --hash=sha256:0f8da300b5c8af9f98111ffd512910bc792b4c77392a9523624680f7956a99d4 \ - --hash=sha256:35f7c7d015d474f4011e859e93e789c87d21f6f4880ebdc29896a60403328f1f \ - --hash=sha256:5aa67414fcdfa22cf052e640cb5ddc461924a045cacf325cd164e65312d99502 \ - --hash=sha256:5d2d8b87a490bfcd407ed9d49093793d0f75198a35e6eb1a923ce1ee86c62b41 \ - --hash=sha256:6687ef6d0a6497e2b58e7c5b852b53f62142cfa7cd1555795758934da363a965 \ - --hash=sha256:6f8ba7f0328b79f08bdacc3e4e66fb4d7aab0c3584e0bd41328dce5262e26b2e \ - --hash=sha256:706843b48f9a3f9b9911979761c91541e3d90db1ca905fd63fee540a217698bc \ - --hash=sha256:807ce09d4434881ca3a7594733669bd834f5b2c6d5c7e36f8c00f691887042ad \ - --hash=sha256:83e17b26de248c33f3acffb922748151d71827d6021d98c70e6c1a25ddd78505 \ - --hash=sha256:96f1157a7c08b5b189b16b47bc9db2332269d6680a196341bf30046330d15388 \ - --hash=sha256:aec5a6c9864be7df2240c382740fcf3b96928c46604eaa7f3091f58b878c0bb6 \ - --hash=sha256:b0afd054cd42f3d213bf82c629efb1ee5f22eba35bf0eec88ea9ea7304f511a2 \ - --hash=sha256:ced4e447ae29ca194449a3f1ce132ded8fcab06971ef5f618605aacaa612beac \ - --hash=sha256:d1f6198ee6d9148405e49887803907fe8962a23e6c6f83ea7d98f1c0de375695 \ - --hash=sha256:e124352fd3db36a9d4a21c1aa27fd5d051e621845cb87fb851c08f4f75ce8be6 \ - --hash=sha256:e422abdec8b5fa8462aa016786680720d78bdce7a30c652b7fadf83a4ba35336 \ - --hash=sha256:ef8b72fa70b348724ff1218267e7f7375b8de4e8194d1636ee60510aae104cd0 \ - --hash=sha256:f0c64d1bd842ca2633e74a1a28033d139368ad959872533b1bab8c80e8240a0c \ - --hash=sha256:f24077a3b5298a5a06a8e0536e3ea9ec60e4c7ac486755e5fb6e6ea9b3500106 \ - --hash=sha256:fdd188c8a6ef8769f148f88f859884507b954cc64db6b52f66ef199bb9ad660a \ - --hash=sha256:fe913f20024eb2cb2f323e42a64bdf2911bb9738a15dba7d3cce48151034e3a8 +cryptography==41.0.0 \ + --hash=sha256:0ddaee209d1cf1f180f1efa338a68c4621154de0afaef92b89486f5f96047c55 \ + --hash=sha256:14754bcdae909d66ff24b7b5f166d69340ccc6cb15731670435efd5719294895 \ + --hash=sha256:344c6de9f8bda3c425b3a41b319522ba3208551b70c2ae00099c205f0d9fd3be \ + --hash=sha256:34d405ea69a8b34566ba3dfb0521379b210ea5d560fafedf9f800a9a94a41928 \ + --hash=sha256:3680248309d340fda9611498a5319b0193a8dbdb73586a1acf8109d06f25b92d \ + --hash=sha256:3c5ef25d060c80d6d9f7f9892e1d41bb1c79b78ce74805b8cb4aa373cb7d5ec8 \ + --hash=sha256:4ab14d567f7bbe7f1cdff1c53d5324ed4d3fc8bd17c481b395db224fb405c237 \ + --hash=sha256:5c1f7293c31ebc72163a9a0df246f890d65f66b4a40d9ec80081969ba8c78cc9 \ + --hash=sha256:6b71f64beeea341c9b4f963b48ee3b62d62d57ba93eb120e1196b31dc1025e78 \ + --hash=sha256:7d92f0248d38faa411d17f4107fc0bce0c42cae0b0ba5415505df72d751bf62d \ + --hash=sha256:8362565b3835ceacf4dc8f3b56471a2289cf51ac80946f9087e66dc283a810e0 \ + --hash=sha256:84a165379cb9d411d58ed739e4af3396e544eac190805a54ba2e0322feb55c46 \ + --hash=sha256:88ff107f211ea696455ea8d911389f6d2b276aabf3231bf72c8853d22db755c5 \ + --hash=sha256:9f65e842cb02550fac96536edb1d17f24c0a338fd84eaf582be25926e993dde4 \ + --hash=sha256:a4fc68d1c5b951cfb72dfd54702afdbbf0fb7acdc9b7dc4301bbf2225a27714d \ + --hash=sha256:b7f2f5c525a642cecad24ee8670443ba27ac1fab81bba4cc24c7b6b41f2d0c75 \ + --hash=sha256:b846d59a8d5a9ba87e2c3d757ca019fa576793e8758174d3868aecb88d6fc8eb \ + --hash=sha256:bf8fc66012ca857d62f6a347007e166ed59c0bc150cefa49f28376ebe7d992a2 \ + --hash=sha256:f5d0bf9b252f30a31664b6f64432b4730bb7038339bd18b1fafe129cfc2be9be # via # gcp-releasetool # secretstorage From 589aa5d04f6b5a2bd310d0bf06aeb7058fb6fcd2 Mon Sep 17 00:00:00 2001 From: Mattie Fu Date: Thu, 8 Jun 2023 16:36:43 -0400 Subject: [PATCH 18/52] fix: add a callback function on flush_rows (#796) * fix: add a callback function on flush_rows * reformat * address comments * update doc * update names * add a test --- google/cloud/bigtable/batcher.py | 13 ++++++++++++- tests/unit/test_batcher.py | 21 +++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) diff --git a/google/cloud/bigtable/batcher.py b/google/cloud/bigtable/batcher.py index 6b06ec060..a6eb806e9 100644 --- a/google/cloud/bigtable/batcher.py +++ b/google/cloud/bigtable/batcher.py @@ -192,6 +192,11 @@ class MutationsBatcher(object): :type flush_interval: float :param flush_interval: (Optional) The interval (in seconds) between asynchronous flush. Default is 1 second. + + :type batch_completed_callback: Callable[list:[`~google.rpc.status_pb2.Status`]] = None + :param batch_completed_callback: (Optional) A callable for handling responses + after the current batch is sent. The callable function expect a list of grpc + Status. """ def __init__( @@ -200,6 +205,7 @@ def __init__( flush_count=FLUSH_COUNT, max_row_bytes=MAX_MUTATION_SIZE, flush_interval=1, + batch_completed_callback=None, ): self._rows = _MutationsBatchQueue( max_mutation_bytes=max_row_bytes, flush_count=flush_count @@ -215,6 +221,7 @@ def __init__( ) self.futures_mapping = {} self.exceptions = queue.Queue() + self._user_batch_completed_callback = batch_completed_callback @property def flush_count(self): @@ -337,7 +344,8 @@ def _flush_async(self): batch_info = _BatchInfo() def _batch_completed_callback(self, future): - """Callback for when the mutation has finished. + """Callback for when the mutation has finished to clean up the current batch + and release items from the flow controller. Raise exceptions if there's any. Release the resources locked by the flow control and allow enqueued tasks to be run. @@ -357,6 +365,9 @@ def _flush_rows(self, rows_to_flush): if len(rows_to_flush) > 0: response = self.table.mutate_rows(rows_to_flush) + if self._user_batch_completed_callback: + self._user_batch_completed_callback(response) + for result in response: if result.code != 0: exc = from_grpc_status(result.code, result.message) diff --git a/tests/unit/test_batcher.py b/tests/unit/test_batcher.py index a238b2852..998748141 100644 --- a/tests/unit/test_batcher.py +++ b/tests/unit/test_batcher.py @@ -35,6 +35,27 @@ def test_mutation_batcher_constructor(): assert table is mutation_batcher.table +def test_mutation_batcher_w_user_callback(): + table = _Table(TABLE_NAME) + + def callback_fn(response): + callback_fn.count = len(response) + + with MutationsBatcher( + table, flush_count=1, batch_completed_callback=callback_fn + ) as mutation_batcher: + rows = [ + DirectRow(row_key=b"row_key"), + DirectRow(row_key=b"row_key_2"), + DirectRow(row_key=b"row_key_3"), + DirectRow(row_key=b"row_key_4"), + ] + + mutation_batcher.mutate_rows(rows) + + assert callback_fn.count == 4 + + def test_mutation_batcher_mutate_row(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table) as mutation_batcher: From fbe6acebd265c243508b6a08f0743bd55d144fb7 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Thu, 8 Jun 2023 18:02:10 -0400 Subject: [PATCH 19/52] chore(main): release 2.19.0 (#789) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .release-please-manifest.json | 2 +- CHANGELOG.md | 17 +++++++++++++++++ google/cloud/bigtable/gapic_version.py | 2 +- google/cloud/bigtable_admin/gapic_version.py | 2 +- google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 22 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index e7a7a136b..b7f666a68 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.18.1" + ".": "2.19.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index d56f02896..dc80386a4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,23 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.19.0](https://github.com/googleapis/python-bigtable/compare/v2.18.1...v2.19.0) (2023-06-08) + + +### Features + +* Add ChangeStreamConfig to CreateTable and UpdateTable ([#786](https://github.com/googleapis/python-bigtable/issues/786)) ([cef70f2](https://github.com/googleapis/python-bigtable/commit/cef70f243541820225f86a520e0b2abd3a7354f7)) + + +### Bug Fixes + +* Add a callback function on flush_rows ([#796](https://github.com/googleapis/python-bigtable/issues/796)) ([589aa5d](https://github.com/googleapis/python-bigtable/commit/589aa5d04f6b5a2bd310d0bf06aeb7058fb6fcd2)) + + +### Documentation + +* **samples:** Add region tags ([#788](https://github.com/googleapis/python-bigtable/issues/788)) ([ecf539c](https://github.com/googleapis/python-bigtable/commit/ecf539c4c976fd9e5505b8abf0b697b218f09fef)) + ## [2.18.1](https://github.com/googleapis/python-bigtable/compare/v2.18.0...v2.18.1) (2023-05-11) diff --git a/google/cloud/bigtable/gapic_version.py b/google/cloud/bigtable/gapic_version.py index e1b4da1de..0f1a446f3 100644 --- a/google/cloud/bigtable/gapic_version.py +++ b/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.1" # {x-release-please-version} +__version__ = "2.19.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin/gapic_version.py b/google/cloud/bigtable_admin/gapic_version.py index e1b4da1de..0f1a446f3 100644 --- a/google/cloud/bigtable_admin/gapic_version.py +++ b/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.1" # {x-release-please-version} +__version__ = "2.19.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin_v2/gapic_version.py b/google/cloud/bigtable_admin_v2/gapic_version.py index e1b4da1de..0f1a446f3 100644 --- a/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.1" # {x-release-please-version} +__version__ = "2.19.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_v2/gapic_version.py b/google/cloud/bigtable_v2/gapic_version.py index e1b4da1de..0f1a446f3 100644 --- a/google/cloud/bigtable_v2/gapic_version.py +++ b/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.18.1" # {x-release-please-version} +__version__ = "2.19.0" # {x-release-please-version} From eb55460b18a40ec1cde298593a0ad49ec45f7ef3 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 28 Jun 2023 03:44:12 -0400 Subject: [PATCH 20/52] chore: remove pinned Sphinx version [autoapprove] (#818) Source-Link: https://github.com/googleapis/synthtool/commit/909573ce9da2819eeb835909c795d29aea5c724e Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:ddf4551385d566771dc713090feb7b4c1164fb8a698fe52bbe7670b24236565b Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 4 ++-- noxfile.py | 3 +-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 02a4dedce..1b3cb6c52 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:240b5bcc2bafd450912d2da2be15e62bc6de2cf839823ae4bf94d4f392b451dc -# created: 2023-06-03T21:25:37.968717478Z + digest: sha256:ddf4551385d566771dc713090feb7b4c1164fb8a698fe52bbe7670b24236565b +# created: 2023-06-27T13:04:21.96690344Z diff --git a/noxfile.py b/noxfile.py index 18f489e19..a33d64638 100644 --- a/noxfile.py +++ b/noxfile.py @@ -345,10 +345,9 @@ def docfx(session): session.install("-e", ".") session.install( - "sphinx==4.0.1", + "gcp-sphinx-docfx-yaml", "alabaster", "recommonmark", - "gcp-sphinx-docfx-yaml", ) shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) From 62dd21798a5c646fb5628e47d484cfb8a594de50 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 29 Jun 2023 12:26:20 -0400 Subject: [PATCH 21/52] chore: store artifacts in placer (#822) Source-Link: https://github.com/googleapis/synthtool/commit/cb960373d12d20f8dc38beee2bf884d49627165e Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:2d816f26f728ac8b24248741e7d4c461c09764ef9f7be3684d557c9632e46dbd Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 4 ++-- .kokoro/release/common.cfg | 9 +++++++++ noxfile.py | 2 +- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 1b3cb6c52..98994f474 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:ddf4551385d566771dc713090feb7b4c1164fb8a698fe52bbe7670b24236565b -# created: 2023-06-27T13:04:21.96690344Z + digest: sha256:2d816f26f728ac8b24248741e7d4c461c09764ef9f7be3684d557c9632e46dbd +# created: 2023-06-28T17:03:33.371210701Z diff --git a/.kokoro/release/common.cfg b/.kokoro/release/common.cfg index 8477e4ca6..2a8fd970c 100644 --- a/.kokoro/release/common.cfg +++ b/.kokoro/release/common.cfg @@ -38,3 +38,12 @@ env_vars: { key: "SECRET_MANAGER_KEYS" value: "releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" } + +# Store the packages we uploaded to PyPI. That way, we have a record of exactly +# what we published, which we can use to generate SBOMs and attestations. +action { + define_artifacts { + regex: "github/python-bigtable/**/*.tar.gz" + strip_prefix: "github/python-bigtable" + } +} diff --git a/noxfile.py b/noxfile.py index a33d64638..fe9f07b4f 100644 --- a/noxfile.py +++ b/noxfile.py @@ -419,6 +419,7 @@ def prerelease_deps(session): "grpcio!=1.52.0rc1", "grpcio-status", "google-api-core", + "google-auth", "proto-plus", "google-cloud-testutils", # dependencies of google-cloud-testutils" @@ -431,7 +432,6 @@ def prerelease_deps(session): # Remaining dependencies other_deps = [ "requests", - "google-auth", ] session.install(*other_deps) From d5720f8f5b5a81572f31d40051b3ec0f1d104304 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 08:37:40 -0400 Subject: [PATCH 22/52] feat: Increase the maximum retention period for a Cloud Bigtable backup from 30 days to 90 days (#817) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add experimental reverse scan for public preview PiperOrigin-RevId: 543539118 Source-Link: https://github.com/googleapis/googleapis/commit/ae187063e3d8a43d85edb9b3084413d568ce7945 Source-Link: https://github.com/googleapis/googleapis-gen/commit/5d05516f84e53aaba63a4b8767ff955ac5bb4a87 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNWQwNTUxNmY4NGU1M2FhYmE2M2E0Yjg3NjdmZjk1NWFjNWJiNGE4NyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * feat: Increase the maximum retention period for a Cloud Bigtable backup from 30 days to 90 days PiperOrigin-RevId: 544356969 Source-Link: https://github.com/googleapis/googleapis/commit/c35889a0b917e22e26c53acafa5c27102a51d623 Source-Link: https://github.com/googleapis/googleapis-gen/commit/c00326ec78565b5d16f92c845ff0bb18f11ca05d Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYzAwMzI2ZWM3ODU2NWI1ZDE2ZjkyYzg0NWZmMGJiMThmMTFjYTA1ZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- google/cloud/bigtable_admin_v2/types/table.py | 2 +- google/cloud/bigtable_v2/types/bigtable.py | 12 ++++++++++++ google/cloud/bigtable_v2/types/feature_flags.py | 9 +++++++++ scripts/fixup_bigtable_v2_keywords.py | 2 +- 4 files changed, 23 insertions(+), 2 deletions(-) diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py index 16d136e16..c7925460d 100644 --- a/google/cloud/bigtable_admin_v2/types/table.py +++ b/google/cloud/bigtable_admin_v2/types/table.py @@ -590,7 +590,7 @@ class Backup(proto.Message): expire_time (google.protobuf.timestamp_pb2.Timestamp): Required. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and - at most 30 days from the time the request is received. Once + at most 90 days from the time the request is received. Once the ``expire_time`` has passed, Cloud Bigtable will delete the backup and free the resources used by the backup. start_time (google.protobuf.timestamp_pb2.Timestamp): diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py index 13f6ac0db..c47e79d77 100644 --- a/google/cloud/bigtable_v2/types/bigtable.py +++ b/google/cloud/bigtable_v2/types/bigtable.py @@ -80,6 +80,14 @@ class ReadRowsRequest(proto.Message): request_stats_view (google.cloud.bigtable_v2.types.ReadRowsRequest.RequestStatsView): The view into RequestStats, as described above. + reversed_ (bool): + Experimental API - Please note that this API is currently + experimental and can change in the future. + + Return rows in lexiographical descending order of the row + keys. The row contents will not be affected by this flag. + Example result set: [ {key: "k2", "f:col1": "v1", "f:col2": + "v1"}, {key: "k1", "f:col1": "v2", "f:col2": "v2"} ]. """ class RequestStatsView(proto.Enum): @@ -131,6 +139,10 @@ class RequestStatsView(proto.Enum): number=6, enum=RequestStatsView, ) + reversed_: bool = proto.Field( + proto.BOOL, + number=7, + ) class ReadRowsResponse(proto.Message): diff --git a/google/cloud/bigtable_v2/types/feature_flags.py b/google/cloud/bigtable_v2/types/feature_flags.py index 1b5f76e24..073269fbb 100644 --- a/google/cloud/bigtable_v2/types/feature_flags.py +++ b/google/cloud/bigtable_v2/types/feature_flags.py @@ -39,12 +39,21 @@ class FeatureFlags(proto.Message): endusers directly. Attributes: + reverse_scans (bool): + Notify the server that the client supports + reverse scans. The server will reject + ReadRowsRequests with the reverse bit set when + this is absent. mutate_rows_rate_limit (bool): Notify the server that the client enables batch write flow control by requesting RateLimitInfo from MutateRowsResponse. """ + reverse_scans: bool = proto.Field( + proto.BOOL, + number=1, + ) mutate_rows_rate_limit: bool = proto.Field( proto.BOOL, number=3, diff --git a/scripts/fixup_bigtable_v2_keywords.py b/scripts/fixup_bigtable_v2_keywords.py index 11ffed53f..3a833f591 100644 --- a/scripts/fixup_bigtable_v2_keywords.py +++ b/scripts/fixup_bigtable_v2_keywords.py @@ -46,7 +46,7 @@ class bigtableCallTransformer(cst.CSTTransformer): 'ping_and_warm': ('name', 'app_profile_id', ), 'read_change_stream': ('table_name', 'app_profile_id', 'partition', 'start_time', 'continuation_tokens', 'end_time', 'heartbeat_duration', ), 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), - 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', ), + 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed_', ), 'sample_row_keys': ('table_name', 'app_profile_id', ), } From 475a16072f3ad41357bdb765fff608a39141ec00 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 4 Jul 2023 15:21:01 -0400 Subject: [PATCH 23/52] fix: Add async context manager return types (#828) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: Add async context manager return types chore: Mock return_value should not populate oneof message fields chore: Support snippet generation for services that only support REST transport chore: Update gapic-generator-python to v1.11.0 PiperOrigin-RevId: 545430278 Source-Link: https://github.com/googleapis/googleapis/commit/601b5326107eeb74800b426d1f9933faa233258a Source-Link: https://github.com/googleapis/googleapis-gen/commit/b3f18d0f6560a855022fd058865e7620479d7af9 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYjNmMThkMGY2NTYwYTg1NTAyMmZkMDU4ODY1ZTc2MjA0NzlkN2FmOSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 2 +- .../bigtable_table_admin/async_client.py | 2 +- .../services/bigtable/async_client.py | 2 +- google/cloud/bigtable_v2/types/bigtable.py | 4 +-- scripts/fixup_bigtable_v2_keywords.py | 2 +- .../test_bigtable_instance_admin.py | 34 +++++-------------- .../test_bigtable_table_admin.py | 18 ++++++---- tests/unit/gapic/bigtable_v2/test_bigtable.py | 6 +--- 8 files changed, 27 insertions(+), 43 deletions(-) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 12811bcea..af4ceaf02 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -2355,7 +2355,7 @@ async def list_hot_tablets( # Done; return the response. return response - async def __aenter__(self): + async def __aenter__(self) -> "BigtableInstanceAdminAsyncClient": return self async def __aexit__(self, exc_type, exc, tb): diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 1663c16eb..995b8d150 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -2671,7 +2671,7 @@ async def test_iam_permissions( # Done; return the response. return response - async def __aenter__(self): + async def __aenter__(self) -> "BigtableTableAdminAsyncClient": return self async def __aexit__(self, exc_type, exc, tb): diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py index abd82d4d8..06e5852a7 100644 --- a/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1210,7 +1210,7 @@ def read_change_stream( # Done; return the response. return response - async def __aenter__(self): + async def __aenter__(self) -> "BigtableAsyncClient": return self async def __aexit__(self, exc_type, exc, tb): diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py index c47e79d77..742606553 100644 --- a/google/cloud/bigtable_v2/types/bigtable.py +++ b/google/cloud/bigtable_v2/types/bigtable.py @@ -80,7 +80,7 @@ class ReadRowsRequest(proto.Message): request_stats_view (google.cloud.bigtable_v2.types.ReadRowsRequest.RequestStatsView): The view into RequestStats, as described above. - reversed_ (bool): + reversed (bool): Experimental API - Please note that this API is currently experimental and can change in the future. @@ -139,7 +139,7 @@ class RequestStatsView(proto.Enum): number=6, enum=RequestStatsView, ) - reversed_: bool = proto.Field( + reversed: bool = proto.Field( proto.BOOL, number=7, ) diff --git a/scripts/fixup_bigtable_v2_keywords.py b/scripts/fixup_bigtable_v2_keywords.py index 3a833f591..dcfe1ab1c 100644 --- a/scripts/fixup_bigtable_v2_keywords.py +++ b/scripts/fixup_bigtable_v2_keywords.py @@ -46,7 +46,7 @@ class bigtableCallTransformer(cst.CSTTransformer): 'ping_and_warm': ('name', 'app_profile_id', ), 'read_change_stream': ('table_name', 'app_profile_id', 'partition', 'start_time', 'continuation_tokens', 'end_time', 'heartbeat_duration', ), 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), - 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed_', ), + 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed', ), 'sample_row_keys': ('table_name', 'app_profile_id', ), } diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 76715f1ed..2cc636bbd 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -2425,11 +2425,6 @@ def test_get_cluster(request_type, transport: str = "grpc"): state=instance.Cluster.State.READY, serve_nodes=1181, default_storage_type=common.StorageType.SSD, - cluster_config=instance.Cluster.ClusterConfig( - cluster_autoscaling_config=instance.Cluster.ClusterAutoscalingConfig( - autoscaling_limits=instance.AutoscalingLimits(min_serve_nodes=1600) - ) - ), ) response = client.get_cluster(request) @@ -3529,9 +3524,6 @@ def test_create_app_profile(request_type, transport: str = "grpc"): name="name_value", etag="etag_value", description="description_value", - multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny( - cluster_ids=["cluster_ids_value"] - ), ) response = client.create_app_profile(request) @@ -3801,9 +3793,6 @@ def test_get_app_profile(request_type, transport: str = "grpc"): name="name_value", etag="etag_value", description="description_value", - multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny( - cluster_ids=["cluster_ids_value"] - ), ) response = client.get_app_profile(request) @@ -4456,9 +4445,11 @@ async def test_list_app_profiles_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_app_profiles(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6138,9 +6129,11 @@ async def test_list_hot_tablets_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_hot_tablets(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -8088,11 +8081,6 @@ def test_get_cluster_rest(request_type): state=instance.Cluster.State.READY, serve_nodes=1181, default_storage_type=common.StorageType.SSD, - cluster_config=instance.Cluster.ClusterConfig( - cluster_autoscaling_config=instance.Cluster.ClusterAutoscalingConfig( - autoscaling_limits=instance.AutoscalingLimits(min_serve_nodes=1600) - ) - ), ) # Wrap the value into a proper Response obj @@ -9345,9 +9333,6 @@ def test_create_app_profile_rest(request_type): name="name_value", etag="etag_value", description="description_value", - multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny( - cluster_ids=["cluster_ids_value"] - ), ) # Wrap the value into a proper Response obj @@ -9670,9 +9655,6 @@ def test_get_app_profile_rest(request_type): name="name_value", etag="etag_value", description="description_value", - multi_cluster_routing_use_any=instance.AppProfile.MultiClusterRoutingUseAny( - cluster_ids=["cluster_ids_value"] - ), ) # Wrap the value into a proper Response obj diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 8498e4fa5..21cd1e5b3 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -1691,9 +1691,11 @@ async def test_list_tables_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_tables(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -4457,9 +4459,11 @@ async def test_list_snapshots_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_snapshots(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token @@ -6058,9 +6062,11 @@ async def test_list_backups_async_pages(): RuntimeError, ) pages = [] - async for page_ in ( + # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` + # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 + async for page_ in ( # pragma: no branch await client.list_backups(request={}) - ).pages: # pragma: no branch + ).pages: pages.append(page_) for page_, token in zip(pages, ["abc", "def", "ghi", ""]): assert page_.raw_page.next_page_token == token diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py index 03ba3044f..5b4c27f63 100644 --- a/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -5316,11 +5316,7 @@ def test_read_change_stream_rest(request_type): # Mock the http request call within the method and fake a response. with mock.patch.object(type(client.transport._session), "request") as req: # Designate an appropriate value for the returned response. - return_value = bigtable.ReadChangeStreamResponse( - data_change=bigtable.ReadChangeStreamResponse.DataChange( - type_=bigtable.ReadChangeStreamResponse.DataChange.Type.USER - ), - ) + return_value = bigtable.ReadChangeStreamResponse() # Wrap the value into a proper Response obj response_value = Response() From 275fb33cbf9958b6455fd32c9fa29b2a80379f01 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 09:06:42 -0400 Subject: [PATCH 24/52] chore: Update gapic-generator-python to v1.11.2 (#829) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to v1.11.2 PiperOrigin-RevId: 546510849 Source-Link: https://github.com/googleapis/googleapis/commit/736073ad9a9763a170eceaaa54519bcc0ea55a5e Source-Link: https://github.com/googleapis/googleapis-gen/commit/deb64e8ec19d141e31089fe932b3a997ad541c4d Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZGViNjRlOGVjMTlkMTQxZTMxMDg5ZmU5MzJiM2E5OTdhZDU0MWM0ZCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- google/cloud/bigtable_admin/__init__.py | 2 +- google/cloud/bigtable_admin_v2/__init__.py | 2 +- google/cloud/bigtable_admin_v2/services/__init__.py | 2 +- .../services/bigtable_instance_admin/__init__.py | 2 +- .../services/bigtable_instance_admin/async_client.py | 2 +- .../services/bigtable_instance_admin/client.py | 2 +- .../services/bigtable_instance_admin/pagers.py | 2 +- .../services/bigtable_instance_admin/transports/__init__.py | 2 +- .../services/bigtable_instance_admin/transports/base.py | 2 +- .../services/bigtable_instance_admin/transports/grpc.py | 2 +- .../services/bigtable_instance_admin/transports/grpc_asyncio.py | 2 +- .../services/bigtable_instance_admin/transports/rest.py | 2 +- .../bigtable_admin_v2/services/bigtable_table_admin/__init__.py | 2 +- .../services/bigtable_table_admin/async_client.py | 2 +- .../bigtable_admin_v2/services/bigtable_table_admin/client.py | 2 +- .../bigtable_admin_v2/services/bigtable_table_admin/pagers.py | 2 +- .../services/bigtable_table_admin/transports/__init__.py | 2 +- .../services/bigtable_table_admin/transports/base.py | 2 +- .../services/bigtable_table_admin/transports/grpc.py | 2 +- .../services/bigtable_table_admin/transports/grpc_asyncio.py | 2 +- .../services/bigtable_table_admin/transports/rest.py | 2 +- google/cloud/bigtable_admin_v2/types/__init__.py | 2 +- google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py | 2 +- google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py | 2 +- google/cloud/bigtable_admin_v2/types/common.py | 2 +- google/cloud/bigtable_admin_v2/types/instance.py | 2 +- google/cloud/bigtable_admin_v2/types/table.py | 2 +- google/cloud/bigtable_v2/__init__.py | 2 +- google/cloud/bigtable_v2/services/__init__.py | 2 +- google/cloud/bigtable_v2/services/bigtable/__init__.py | 2 +- google/cloud/bigtable_v2/services/bigtable/async_client.py | 2 +- google/cloud/bigtable_v2/services/bigtable/client.py | 2 +- .../cloud/bigtable_v2/services/bigtable/transports/__init__.py | 2 +- google/cloud/bigtable_v2/services/bigtable/transports/base.py | 2 +- google/cloud/bigtable_v2/services/bigtable/transports/grpc.py | 2 +- .../bigtable_v2/services/bigtable/transports/grpc_asyncio.py | 2 +- google/cloud/bigtable_v2/services/bigtable/transports/rest.py | 2 +- google/cloud/bigtable_v2/types/__init__.py | 2 +- google/cloud/bigtable_v2/types/bigtable.py | 2 +- google/cloud/bigtable_v2/types/data.py | 2 +- google/cloud/bigtable_v2/types/feature_flags.py | 2 +- google/cloud/bigtable_v2/types/request_stats.py | 2 +- google/cloud/bigtable_v2/types/response_params.py | 2 +- scripts/fixup_bigtable_admin_v2_keywords.py | 2 +- scripts/fixup_bigtable_v2_keywords.py | 2 +- tests/__init__.py | 2 +- tests/unit/__init__.py | 2 +- tests/unit/gapic/__init__.py | 2 +- tests/unit/gapic/bigtable_admin_v2/__init__.py | 2 +- .../gapic/bigtable_admin_v2/test_bigtable_instance_admin.py | 2 +- tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py | 2 +- tests/unit/gapic/bigtable_v2/__init__.py | 2 +- tests/unit/gapic/bigtable_v2/test_bigtable.py | 2 +- 53 files changed, 53 insertions(+), 53 deletions(-) diff --git a/google/cloud/bigtable_admin/__init__.py b/google/cloud/bigtable_admin/__init__.py index 0ba93ec63..43535ae20 100644 --- a/google/cloud/bigtable_admin/__init__.py +++ b/google/cloud/bigtable_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py index c030ec1bd..8033a0af7 100644 --- a/google/cloud/bigtable_admin_v2/__init__.py +++ b/google/cloud/bigtable_admin_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/__init__.py b/google/cloud/bigtable_admin_v2/services/__init__.py index e8e1c3845..89a37dc92 100644 --- a/google/cloud/bigtable_admin_v2/services/__init__.py +++ b/google/cloud/bigtable_admin_v2/services/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py index 1fb10736e..40631d1b4 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index af4ceaf02..111b8cbf7 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index ecc9bf1e2..33c8510b9 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index bfcbbf23d..0d646a96e 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py index e5637c0da..62da28c88 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index bd45f319f..d92d25453 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index f037f5a44..eca37957d 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 82b03b0bb..145aa427d 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index e9b94cf78..228f5c02c 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py index 515696537..544649e90 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 995b8d150..02dd0153d 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index e043aa224..c83f3116f 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index e639227df..331647b4c 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py index 585b4e437..be4aa8d2a 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index cade1335b..591c6bcfe 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index f8cf9f834..e18be126c 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 54eb7e524..8f72e3fe8 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 4d5b2ed1c..817916977 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/types/__init__.py b/google/cloud/bigtable_admin_v2/types/__init__.py index 69153c9fc..c69e3129b 100644 --- a/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/google/cloud/bigtable_admin_v2/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py index a22543354..87332a351 100644 --- a/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ b/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 4c4b9e9e2..dfa815dc2 100644 --- a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/types/common.py b/google/cloud/bigtable_admin_v2/types/common.py index 2cc71fc43..959b9deb1 100644 --- a/google/cloud/bigtable_admin_v2/types/common.py +++ b/google/cloud/bigtable_admin_v2/types/common.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/types/instance.py b/google/cloud/bigtable_admin_v2/types/instance.py index 2b5d81636..6ae9159d0 100644 --- a/google/cloud/bigtable_admin_v2/types/instance.py +++ b/google/cloud/bigtable_admin_v2/types/instance.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py index c7925460d..1dd0ff0b2 100644 --- a/google/cloud/bigtable_admin_v2/types/table.py +++ b/google/cloud/bigtable_admin_v2/types/table.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/__init__.py b/google/cloud/bigtable_v2/__init__.py index ee3bd8c0c..80bd4ec09 100644 --- a/google/cloud/bigtable_v2/__init__.py +++ b/google/cloud/bigtable_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/services/__init__.py b/google/cloud/bigtable_v2/services/__init__.py index e8e1c3845..89a37dc92 100644 --- a/google/cloud/bigtable_v2/services/__init__.py +++ b/google/cloud/bigtable_v2/services/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/services/bigtable/__init__.py b/google/cloud/bigtable_v2/services/bigtable/__init__.py index cfce7b6b8..f10a68e5b 100644 --- a/google/cloud/bigtable_v2/services/bigtable/__init__.py +++ b/google/cloud/bigtable_v2/services/bigtable/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py index 06e5852a7..07a782d0c 100644 --- a/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py index a778aff3c..db393faa7 100644 --- a/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py index 1b03919f6..c09443bc2 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/google/cloud/bigtable_v2/services/bigtable/transports/base.py index 5b4580c18..b580bbca7 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index b9e073e8a..8ba04e761 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 8bf02ce77..2c0cbdad6 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index 4343fbb90..31d230f94 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/types/__init__.py b/google/cloud/bigtable_v2/types/__init__.py index 9f15efaf5..f266becb9 100644 --- a/google/cloud/bigtable_v2/types/__init__.py +++ b/google/cloud/bigtable_v2/types/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py index 742606553..378f95480 100644 --- a/google/cloud/bigtable_v2/types/bigtable.py +++ b/google/cloud/bigtable_v2/types/bigtable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/types/data.py b/google/cloud/bigtable_v2/types/data.py index 515e167df..3fad8f87c 100644 --- a/google/cloud/bigtable_v2/types/data.py +++ b/google/cloud/bigtable_v2/types/data.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/types/feature_flags.py b/google/cloud/bigtable_v2/types/feature_flags.py index 073269fbb..d1fd03ff6 100644 --- a/google/cloud/bigtable_v2/types/feature_flags.py +++ b/google/cloud/bigtable_v2/types/feature_flags.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/types/request_stats.py b/google/cloud/bigtable_v2/types/request_stats.py index d72ba8694..27c2bb028 100644 --- a/google/cloud/bigtable_v2/types/request_stats.py +++ b/google/cloud/bigtable_v2/types/request_stats.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/google/cloud/bigtable_v2/types/response_params.py b/google/cloud/bigtable_v2/types/response_params.py index 2532e64e2..98e3a67db 100644 --- a/google/cloud/bigtable_v2/types/response_params.py +++ b/google/cloud/bigtable_v2/types/response_params.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/fixup_bigtable_admin_v2_keywords.py b/scripts/fixup_bigtable_admin_v2_keywords.py index 17be56f2f..58eab8bcf 100644 --- a/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/scripts/fixup_bigtable_admin_v2_keywords.py @@ -1,6 +1,6 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/fixup_bigtable_v2_keywords.py b/scripts/fixup_bigtable_v2_keywords.py index dcfe1ab1c..8d32e5b70 100644 --- a/scripts/fixup_bigtable_v2_keywords.py +++ b/scripts/fixup_bigtable_v2_keywords.py @@ -1,6 +1,6 @@ #! /usr/bin/env python3 # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/__init__.py b/tests/__init__.py index e8e1c3845..89a37dc92 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py index e8e1c3845..89a37dc92 100644 --- a/tests/unit/__init__.py +++ b/tests/unit/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/__init__.py b/tests/unit/gapic/__init__.py index e8e1c3845..89a37dc92 100644 --- a/tests/unit/gapic/__init__.py +++ b/tests/unit/gapic/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/bigtable_admin_v2/__init__.py b/tests/unit/gapic/bigtable_admin_v2/__init__.py index e8e1c3845..89a37dc92 100644 --- a/tests/unit/gapic/bigtable_admin_v2/__init__.py +++ b/tests/unit/gapic/bigtable_admin_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 2cc636bbd..f5f5fc514 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 21cd1e5b3..0a0b3b671 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/bigtable_v2/__init__.py b/tests/unit/gapic/bigtable_v2/__init__.py index e8e1c3845..89a37dc92 100644 --- a/tests/unit/gapic/bigtable_v2/__init__.py +++ b/tests/unit/gapic/bigtable_v2/__init__.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py index 5b4c27f63..597540d69 100644 --- a/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 4778d928488b760d5dba7d8b040ecbc33bf0ae2a Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 10 Jul 2023 16:40:12 -0400 Subject: [PATCH 25/52] chore: Update gapic-generator-python to v1.11.3 (#830) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to v1.11.3 PiperOrigin-RevId: 546899192 Source-Link: https://github.com/googleapis/googleapis/commit/e6b16918b98fe1a35f725b56537354f22b6cdc48 Source-Link: https://github.com/googleapis/googleapis-gen/commit/0b3917c421cbda7fcb67092e16c33f3ea46f4bc7 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMGIzOTE3YzQyMWNiZGE3ZmNiNjcwOTJlMTZjMzNmM2VhNDZmNGJjNyJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 12 ++++----- .../bigtable_instance_admin/client.py | 12 ++++----- .../bigtable_table_admin/async_client.py | 12 ++++----- .../services/bigtable_table_admin/client.py | 12 ++++----- .../cloud/bigtable_admin_v2/types/instance.py | 4 +-- google/cloud/bigtable_admin_v2/types/table.py | 4 +-- .../services/bigtable/async_client.py | 25 +++++++++++-------- .../bigtable_v2/services/bigtable/client.py | 25 +++++++++++-------- .../services/bigtable/transports/grpc.py | 7 +++--- .../bigtable/transports/grpc_asyncio.py | 7 +++--- .../services/bigtable/transports/rest.py | 18 +++++++------ google/cloud/bigtable_v2/types/bigtable.py | 12 ++++++--- google/cloud/bigtable_v2/types/data.py | 11 ++++---- 13 files changed, 88 insertions(+), 73 deletions(-) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 111b8cbf7..4b45774f0 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1883,8 +1883,8 @@ async def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the + policy is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2032,8 +2032,8 @@ async def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the + policy is being specified. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2172,8 +2172,8 @@ async def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the + policy detail is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 33c8510b9..fb993b651 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -2107,8 +2107,8 @@ def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the + policy is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2243,8 +2243,8 @@ def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the + policy is being specified. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2380,8 +2380,8 @@ def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the + policy detail is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 02dd0153d..a7eed5d37 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -2301,8 +2301,8 @@ async def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the + policy is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2450,8 +2450,8 @@ async def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the + policy is being specified. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2590,8 +2590,8 @@ async def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the + policy detail is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index c83f3116f..037464954 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -2556,8 +2556,8 @@ def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the + policy is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2692,8 +2692,8 @@ def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the + policy is being specified. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2829,8 +2829,8 @@ def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the + policy detail is being requested. + See the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/google/cloud/bigtable_admin_v2/types/instance.py b/google/cloud/bigtable_admin_v2/types/instance.py index 6ae9159d0..aa85eba19 100644 --- a/google/cloud/bigtable_admin_v2/types/instance.py +++ b/google/cloud/bigtable_admin_v2/types/instance.py @@ -117,8 +117,8 @@ class Type(proto.Enum): be set on the cluster. DEVELOPMENT (2): DEPRECATED: Prefer PRODUCTION for all use - cases, as it no longer enforces a higher minimum - node count than DEVELOPMENT. + cases, as it no longer enforces + a higher minimum node count than DEVELOPMENT. """ TYPE_UNSPECIFIED = 0 PRODUCTION = 1 diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py index 1dd0ff0b2..609d165f9 100644 --- a/google/cloud/bigtable_admin_v2/types/table.py +++ b/google/cloud/bigtable_admin_v2/types/table.py @@ -143,8 +143,8 @@ class Table(proto.Message): deletion_protection (bool): Set to true to make the table protected against data loss. i.e. deleting the following - resources through Admin APIs are prohibited: - - The table. + resources through Admin APIs are prohibited: + - The table. - The column families in the table. - The instance containing the table. Note one can still delete the data stored in the diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py index 07a782d0c..441022ac7 100644 --- a/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1030,8 +1030,8 @@ def generate_initial_change_stream_partitions( Args: request (Optional[Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for + by Apache Beam BigtableIO. + Request message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (:class:`str`): Required. The unique name of the table from which to get @@ -1061,8 +1061,8 @@ def generate_initial_change_stream_partitions( Returns: AsyncIterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for + by Apache Beam BigtableIO. + Response message for Bigtable.GenerateInitialChangeStreamPartitions. """ @@ -1123,15 +1123,17 @@ def read_change_stream( metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadChangeStreamResponse]]: r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + BigtableIO. + Reads changes from a table's change stream. Changes will + reflect both user-initiated mutations and mutations that + are caused by garbage collection. Args: request (Optional[Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. + Request message for + Bigtable.ReadChangeStream. table_name (:class:`str`): Required. The unique name of the table from which to read a change stream. Values are of the form @@ -1160,8 +1162,9 @@ def read_change_stream( Returns: AsyncIterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. + Response message for + Bigtable.ReadChangeStream. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py index db393faa7..595310c88 100644 --- a/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1330,8 +1330,8 @@ def generate_initial_change_stream_partitions( Args: request (Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for + by Apache Beam BigtableIO. + Request message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (str): Required. The unique name of the table from which to get @@ -1361,8 +1361,8 @@ def generate_initial_change_stream_partitions( Returns: Iterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for + by Apache Beam BigtableIO. + Response message for Bigtable.GenerateInitialChangeStreamPartitions. """ @@ -1427,15 +1427,17 @@ def read_change_stream( metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[bigtable.ReadChangeStreamResponse]: r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + BigtableIO. + Reads changes from a table's change stream. Changes will + reflect both user-initiated mutations and mutations that + are caused by garbage collection. Args: request (Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. + Request message for + Bigtable.ReadChangeStream. table_name (str): Required. The unique name of the table from which to read a change stream. Values are of the form @@ -1464,8 +1466,9 @@ def read_change_stream( Returns: Iterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. + Response message for + Bigtable.ReadChangeStream. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 8ba04e761..0e0666242 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -481,9 +481,10 @@ def read_change_stream( r"""Return a callable for the read change stream method over gRPC. NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + BigtableIO. + Reads changes from a table's change stream. Changes will + reflect both user-initiated mutations and mutations that + are caused by garbage collection. Returns: Callable[[~.ReadChangeStreamRequest], diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 2c0cbdad6..49259969b 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -490,9 +490,10 @@ def read_change_stream( r"""Return a callable for the read change stream method over gRPC. NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. + BigtableIO. + Reads changes from a table's change stream. Changes will + reflect both user-initiated mutations and mutations that + are caused by garbage collection. Returns: Callable[[~.ReadChangeStreamRequest], diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index 31d230f94..066b35e2a 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -571,8 +571,8 @@ def __call__( Args: request (~.bigtable.GenerateInitialChangeStreamPartitionsRequest): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for + by Apache Beam BigtableIO. + Request message for Bigtable.GenerateInitialChangeStreamPartitions. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -583,8 +583,8 @@ def __call__( Returns: ~.bigtable.GenerateInitialChangeStreamPartitionsResponse: NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for + by Apache Beam BigtableIO. + Response message for Bigtable.GenerateInitialChangeStreamPartitions. """ @@ -975,8 +975,9 @@ def __call__( Args: request (~.bigtable.ReadChangeStreamRequest): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. + Request message for + Bigtable.ReadChangeStream. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -986,8 +987,9 @@ def __call__( Returns: ~.bigtable.ReadChangeStreamResponse: NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. + Response message for + Bigtable.ReadChangeStream. """ diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py index 378f95480..ab5358194 100644 --- a/google/cloud/bigtable_v2/types/bigtable.py +++ b/google/cloud/bigtable_v2/types/bigtable.py @@ -749,7 +749,8 @@ class ReadModifyWriteRowResponse(proto.Message): class GenerateInitialChangeStreamPartitionsRequest(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Request message for + BigtableIO. + Request message for Bigtable.GenerateInitialChangeStreamPartitions. Attributes: @@ -777,7 +778,8 @@ class GenerateInitialChangeStreamPartitionsRequest(proto.Message): class GenerateInitialChangeStreamPartitionsResponse(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Response message for + BigtableIO. + Response message for Bigtable.GenerateInitialChangeStreamPartitions. Attributes: @@ -794,7 +796,8 @@ class GenerateInitialChangeStreamPartitionsResponse(proto.Message): class ReadChangeStreamRequest(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Request message for Bigtable.ReadChangeStream. + BigtableIO. + Request message for Bigtable.ReadChangeStream. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -890,7 +893,8 @@ class ReadChangeStreamRequest(proto.Message): class ReadChangeStreamResponse(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Response message for Bigtable.ReadChangeStream. + BigtableIO. + Response message for Bigtable.ReadChangeStream. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. diff --git a/google/cloud/bigtable_v2/types/data.py b/google/cloud/bigtable_v2/types/data.py index 3fad8f87c..9913d9ed0 100644 --- a/google/cloud/bigtable_v2/types/data.py +++ b/google/cloud/bigtable_v2/types/data.py @@ -639,10 +639,10 @@ class Chain(proto.Message): Attributes: filters (MutableSequence[google.cloud.bigtable_v2.types.RowFilter]): The elements of "filters" are chained - together to process the input row: in row -> - f(0) -> intermediate row -> f(1) -> ... -> f(N) - -> out row The full chain is executed - atomically. + together to process the input row: + in row -> f(0) -> intermediate row -> f(1) -> + ... -> f(N) -> out row The full chain is + executed atomically. """ filters: MutableSequence["RowFilter"] = proto.RepeatedField( @@ -1041,7 +1041,8 @@ class ReadModifyWriteRule(proto.Message): class StreamPartition(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. A partition of a change stream. + BigtableIO. + A partition of a change stream. Attributes: row_range (google.cloud.bigtable_v2.types.RowRange): From fddd0ba97155e112af92a98fd8f20e59b139d177 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 13 Jul 2023 11:21:06 -0400 Subject: [PATCH 26/52] docs: fix formatting for reversed order field example (#831) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: fix formatting for reversed order field example PiperOrigin-RevId: 547553954 Source-Link: https://github.com/googleapis/googleapis/commit/c4e6427fcefd1cd9a15a3008ae7ee8adca972276 Source-Link: https://github.com/googleapis/googleapis-gen/commit/f552269609d4183546543bfe3a022f544d4f5bdb Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZjU1MjI2OTYwOWQ0MTgzNTQ2NTQzYmZlM2EwMjJmNTQ0ZDRmNWJkYiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- google/cloud/bigtable_v2/types/bigtable.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py index ab5358194..42740eee2 100644 --- a/google/cloud/bigtable_v2/types/bigtable.py +++ b/google/cloud/bigtable_v2/types/bigtable.py @@ -86,8 +86,15 @@ class ReadRowsRequest(proto.Message): Return rows in lexiographical descending order of the row keys. The row contents will not be affected by this flag. - Example result set: [ {key: "k2", "f:col1": "v1", "f:col2": - "v1"}, {key: "k1", "f:col1": "v2", "f:col2": "v2"} ]. + + Example result set: + + :: + + [ + {key: "k2", "f:col1": "v1", "f:col2": "v1"}, + {key: "k1", "f:col1": "v2", "f:col2": "v2"} + ] """ class RequestStatsView(proto.Enum): From f146162ccdb1a642b215182c714a710849282a2a Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 14 Jul 2023 10:14:17 -0400 Subject: [PATCH 27/52] chore: Update gapic-generator-python to v1.11.4 (#832) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to v1.11.4 PiperOrigin-RevId: 547897126 Source-Link: https://github.com/googleapis/googleapis/commit/c09c75e087d8f9a2d466b4aaad7dd2926b5ead5a Source-Link: https://github.com/googleapis/googleapis-gen/commit/45e0ec4343517cd0aa66b5ca64232a1802c2f945 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNDVlMGVjNDM0MzUxN2NkMGFhNjZiNWNhNjQyMzJhMTgwMmMyZjk0NSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- google/cloud/bigtable_admin_v2/types/table.py | 1 + google/cloud/bigtable_v2/types/data.py | 1 + 2 files changed, 2 insertions(+) diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py index 609d165f9..e75ac00bb 100644 --- a/google/cloud/bigtable_admin_v2/types/table.py +++ b/google/cloud/bigtable_admin_v2/types/table.py @@ -144,6 +144,7 @@ class Table(proto.Message): Set to true to make the table protected against data loss. i.e. deleting the following resources through Admin APIs are prohibited: + - The table. - The column families in the table. - The instance containing the table. diff --git a/google/cloud/bigtable_v2/types/data.py b/google/cloud/bigtable_v2/types/data.py index 9913d9ed0..0e9e0bfe5 100644 --- a/google/cloud/bigtable_v2/types/data.py +++ b/google/cloud/bigtable_v2/types/data.py @@ -640,6 +640,7 @@ class Chain(proto.Message): filters (MutableSequence[google.cloud.bigtable_v2.types.RowFilter]): The elements of "filters" are chained together to process the input row: + in row -> f(0) -> intermediate row -> f(1) -> ... -> f(N) -> out row The full chain is executed atomically. From 7f747e10d5c4182d95327321872b4d78519de818 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 17 Jul 2023 11:52:31 -0400 Subject: [PATCH 28/52] build(deps): [autoapprove] bump cryptography from 41.0.0 to 41.0.2 (#834) Source-Link: https://github.com/googleapis/synthtool/commit/d6103f4a3540ba60f633a9e25c37ec5fe7e6286d Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:39f0f3f2be02ef036e297e376fe3b6256775576da8a6ccb1d5eeb80f4c8bf8fb Co-authored-by: Owl Bot --- .flake8 | 2 +- .github/.OwlBot.lock.yaml | 4 +-- .github/auto-label.yaml | 2 +- .kokoro/build.sh | 2 +- .kokoro/docker/docs/Dockerfile | 2 +- .kokoro/populate-secrets.sh | 2 +- .kokoro/publish-docs.sh | 2 +- .kokoro/release.sh | 2 +- .kokoro/requirements.txt | 44 +++++++++++++++------------- .kokoro/test-samples-against-head.sh | 2 +- .kokoro/test-samples-impl.sh | 2 +- .kokoro/test-samples.sh | 2 +- .kokoro/trampoline.sh | 2 +- .kokoro/trampoline_v2.sh | 2 +- .pre-commit-config.yaml | 2 +- .trampolinerc | 4 +-- MANIFEST.in | 2 +- docs/conf.py | 2 +- noxfile.py | 3 +- scripts/decrypt-secrets.sh | 2 +- scripts/readme-gen/readme_gen.py | 18 ++++++------ setup.cfg | 2 +- 22 files changed, 55 insertions(+), 52 deletions(-) diff --git a/.flake8 b/.flake8 index 2e4387498..87f6e408c 100644 --- a/.flake8 +++ b/.flake8 @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 98994f474..ae4a522b9 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:2d816f26f728ac8b24248741e7d4c461c09764ef9f7be3684d557c9632e46dbd -# created: 2023-06-28T17:03:33.371210701Z + digest: sha256:39f0f3f2be02ef036e297e376fe3b6256775576da8a6ccb1d5eeb80f4c8bf8fb +# created: 2023-07-17T15:20:13.819193964Z diff --git a/.github/auto-label.yaml b/.github/auto-label.yaml index 41bff0b53..b2016d119 100644 --- a/.github/auto-label.yaml +++ b/.github/auto-label.yaml @@ -1,4 +1,4 @@ -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/build.sh b/.kokoro/build.sh index 2ab1155b2..dec6b66a7 100755 --- a/.kokoro/build.sh +++ b/.kokoro/build.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2018 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile index f8137d0ae..8e39a2cc4 100644 --- a/.kokoro/docker/docs/Dockerfile +++ b/.kokoro/docker/docs/Dockerfile @@ -1,4 +1,4 @@ -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh index f52514257..6f3972140 100755 --- a/.kokoro/populate-secrets.sh +++ b/.kokoro/populate-secrets.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC. +# Copyright 2023 Google LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/publish-docs.sh b/.kokoro/publish-docs.sh index 1c4d62370..9eafe0be3 100755 --- a/.kokoro/publish-docs.sh +++ b/.kokoro/publish-docs.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/release.sh b/.kokoro/release.sh index 6b594c813..2e1cbfa81 100755 --- a/.kokoro/release.sh +++ b/.kokoro/release.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index c7929db6d..67d70a110 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -113,26 +113,30 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==41.0.0 \ - --hash=sha256:0ddaee209d1cf1f180f1efa338a68c4621154de0afaef92b89486f5f96047c55 \ - --hash=sha256:14754bcdae909d66ff24b7b5f166d69340ccc6cb15731670435efd5719294895 \ - --hash=sha256:344c6de9f8bda3c425b3a41b319522ba3208551b70c2ae00099c205f0d9fd3be \ - --hash=sha256:34d405ea69a8b34566ba3dfb0521379b210ea5d560fafedf9f800a9a94a41928 \ - --hash=sha256:3680248309d340fda9611498a5319b0193a8dbdb73586a1acf8109d06f25b92d \ - --hash=sha256:3c5ef25d060c80d6d9f7f9892e1d41bb1c79b78ce74805b8cb4aa373cb7d5ec8 \ - --hash=sha256:4ab14d567f7bbe7f1cdff1c53d5324ed4d3fc8bd17c481b395db224fb405c237 \ - --hash=sha256:5c1f7293c31ebc72163a9a0df246f890d65f66b4a40d9ec80081969ba8c78cc9 \ - --hash=sha256:6b71f64beeea341c9b4f963b48ee3b62d62d57ba93eb120e1196b31dc1025e78 \ - --hash=sha256:7d92f0248d38faa411d17f4107fc0bce0c42cae0b0ba5415505df72d751bf62d \ - --hash=sha256:8362565b3835ceacf4dc8f3b56471a2289cf51ac80946f9087e66dc283a810e0 \ - --hash=sha256:84a165379cb9d411d58ed739e4af3396e544eac190805a54ba2e0322feb55c46 \ - --hash=sha256:88ff107f211ea696455ea8d911389f6d2b276aabf3231bf72c8853d22db755c5 \ - --hash=sha256:9f65e842cb02550fac96536edb1d17f24c0a338fd84eaf582be25926e993dde4 \ - --hash=sha256:a4fc68d1c5b951cfb72dfd54702afdbbf0fb7acdc9b7dc4301bbf2225a27714d \ - --hash=sha256:b7f2f5c525a642cecad24ee8670443ba27ac1fab81bba4cc24c7b6b41f2d0c75 \ - --hash=sha256:b846d59a8d5a9ba87e2c3d757ca019fa576793e8758174d3868aecb88d6fc8eb \ - --hash=sha256:bf8fc66012ca857d62f6a347007e166ed59c0bc150cefa49f28376ebe7d992a2 \ - --hash=sha256:f5d0bf9b252f30a31664b6f64432b4730bb7038339bd18b1fafe129cfc2be9be +cryptography==41.0.2 \ + --hash=sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711 \ + --hash=sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7 \ + --hash=sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd \ + --hash=sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e \ + --hash=sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58 \ + --hash=sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0 \ + --hash=sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d \ + --hash=sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83 \ + --hash=sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831 \ + --hash=sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766 \ + --hash=sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b \ + --hash=sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c \ + --hash=sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182 \ + --hash=sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f \ + --hash=sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa \ + --hash=sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4 \ + --hash=sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a \ + --hash=sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2 \ + --hash=sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76 \ + --hash=sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5 \ + --hash=sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee \ + --hash=sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f \ + --hash=sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14 # via # gcp-releasetool # secretstorage diff --git a/.kokoro/test-samples-against-head.sh b/.kokoro/test-samples-against-head.sh index ba3a707b0..63ac41dfa 100755 --- a/.kokoro/test-samples-against-head.sh +++ b/.kokoro/test-samples-against-head.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/test-samples-impl.sh b/.kokoro/test-samples-impl.sh index 2c6500cae..5a0f5fab6 100755 --- a/.kokoro/test-samples-impl.sh +++ b/.kokoro/test-samples-impl.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2021 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/test-samples.sh b/.kokoro/test-samples.sh index 11c042d34..50b35a48c 100755 --- a/.kokoro/test-samples.sh +++ b/.kokoro/test-samples.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh index f39236e94..d85b1f267 100755 --- a/.kokoro/trampoline.sh +++ b/.kokoro/trampoline.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Copyright 2017 Google Inc. +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh index 4af6cdc26..59a7cf3a9 100755 --- a/.kokoro/trampoline_v2.sh +++ b/.kokoro/trampoline_v2.sh @@ -1,5 +1,5 @@ #!/usr/bin/env bash -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5405cc8ff..9e3898fd1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,4 +1,4 @@ -# Copyright 2021 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/.trampolinerc b/.trampolinerc index 0eee72ab6..a7dfeb42c 100644 --- a/.trampolinerc +++ b/.trampolinerc @@ -1,4 +1,4 @@ -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -12,8 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Template for .trampolinerc - # Add required env vars here. required_envvars+=( ) diff --git a/MANIFEST.in b/MANIFEST.in index e783f4c62..e0a667053 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/docs/conf.py b/docs/conf.py index 34f3a4d08..b5a870f58 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- -# Copyright 2021 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/noxfile.py b/noxfile.py index fe9f07b4f..837b60887 100644 --- a/noxfile.py +++ b/noxfile.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2018 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -440,6 +440,7 @@ def prerelease_deps(session): "python", "-c", "import google.protobuf; print(google.protobuf.__version__)" ) session.run("python", "-c", "import grpc; print(grpc.__version__)") + session.run("python", "-c", "import google.auth; print(google.auth.__version__)") session.run("py.test", "tests/unit") diff --git a/scripts/decrypt-secrets.sh b/scripts/decrypt-secrets.sh index 21f6d2a26..0018b421d 100755 --- a/scripts/decrypt-secrets.sh +++ b/scripts/decrypt-secrets.sh @@ -1,6 +1,6 @@ #!/bin/bash -# Copyright 2015 Google Inc. All rights reserved. +# Copyright 2023 Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/scripts/readme-gen/readme_gen.py b/scripts/readme-gen/readme_gen.py index 91b59676b..1acc11983 100644 --- a/scripts/readme-gen/readme_gen.py +++ b/scripts/readme-gen/readme_gen.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -# Copyright 2016 Google Inc +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -33,17 +33,17 @@ autoescape=True, ) -README_TMPL = jinja_env.get_template('README.tmpl.rst') +README_TMPL = jinja_env.get_template("README.tmpl.rst") def get_help(file): - return subprocess.check_output(['python', file, '--help']).decode() + return subprocess.check_output(["python", file, "--help"]).decode() def main(): parser = argparse.ArgumentParser() - parser.add_argument('source') - parser.add_argument('--destination', default='README.rst') + parser.add_argument("source") + parser.add_argument("--destination", default="README.rst") args = parser.parse_args() @@ -51,9 +51,9 @@ def main(): root = os.path.dirname(source) destination = os.path.join(root, args.destination) - jinja_env.globals['get_help'] = get_help + jinja_env.globals["get_help"] = get_help - with io.open(source, 'r') as f: + with io.open(source, "r") as f: config = yaml.load(f) # This allows get_help to execute in the right directory. @@ -61,9 +61,9 @@ def main(): output = README_TMPL.render(config) - with io.open(destination, 'w') as f: + with io.open(destination, "w") as f: f.write(output) -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/setup.cfg b/setup.cfg index c3a2b39f6..052350089 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. From 8d80040e1f45f68f4baf5e20bb9cdce974609be1 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 19 Jul 2023 12:51:34 -0400 Subject: [PATCH 29/52] chore(main): release 2.20.0 (#827) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .release-please-manifest.json | 2 +- CHANGELOG.md | 18 ++++++++++++++++++ google/cloud/bigtable/gapic_version.py | 2 +- google/cloud/bigtable_admin/gapic_version.py | 2 +- .../cloud/bigtable_admin_v2/gapic_version.py | 2 +- google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 23 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index b7f666a68..ba3e06a78 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.19.0" + ".": "2.20.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index dc80386a4..2e86e4d43 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,24 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.20.0](https://github.com/googleapis/python-bigtable/compare/v2.19.0...v2.20.0) (2023-07-17) + + +### Features + +* Add experimental reverse scan for public preview ([d5720f8](https://github.com/googleapis/python-bigtable/commit/d5720f8f5b5a81572f31d40051b3ec0f1d104304)) +* Increase the maximum retention period for a Cloud Bigtable backup from 30 days to 90 days ([d5720f8](https://github.com/googleapis/python-bigtable/commit/d5720f8f5b5a81572f31d40051b3ec0f1d104304)) + + +### Bug Fixes + +* Add async context manager return types ([#828](https://github.com/googleapis/python-bigtable/issues/828)) ([475a160](https://github.com/googleapis/python-bigtable/commit/475a16072f3ad41357bdb765fff608a39141ec00)) + + +### Documentation + +* Fix formatting for reversed order field example ([#831](https://github.com/googleapis/python-bigtable/issues/831)) ([fddd0ba](https://github.com/googleapis/python-bigtable/commit/fddd0ba97155e112af92a98fd8f20e59b139d177)) + ## [2.19.0](https://github.com/googleapis/python-bigtable/compare/v2.18.1...v2.19.0) (2023-06-08) diff --git a/google/cloud/bigtable/gapic_version.py b/google/cloud/bigtable/gapic_version.py index 0f1a446f3..551f0d2eb 100644 --- a/google/cloud/bigtable/gapic_version.py +++ b/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.19.0" # {x-release-please-version} +__version__ = "2.20.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin/gapic_version.py b/google/cloud/bigtable_admin/gapic_version.py index 0f1a446f3..551f0d2eb 100644 --- a/google/cloud/bigtable_admin/gapic_version.py +++ b/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.19.0" # {x-release-please-version} +__version__ = "2.20.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin_v2/gapic_version.py b/google/cloud/bigtable_admin_v2/gapic_version.py index 0f1a446f3..551f0d2eb 100644 --- a/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.19.0" # {x-release-please-version} +__version__ = "2.20.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_v2/gapic_version.py b/google/cloud/bigtable_v2/gapic_version.py index 0f1a446f3..551f0d2eb 100644 --- a/google/cloud/bigtable_v2/gapic_version.py +++ b/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.19.0" # {x-release-please-version} +__version__ = "2.20.0" # {x-release-please-version} From 57c736d34f34f6cefb6ff8087537d0921f093ef9 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 21 Jul 2023 09:19:49 -0400 Subject: [PATCH 30/52] build(deps): [autoapprove] bump pygments from 2.13.0 to 2.15.0 (#841) Source-Link: https://github.com/googleapis/synthtool/commit/eaef28efd179e6eeb9f4e9bf697530d074a6f3b9 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:f8ca7655fa8a449cadcabcbce4054f593dcbae7aeeab34aa3fcc8b5cf7a93c9e Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 4 ++-- .kokoro/requirements.txt | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index ae4a522b9..17c21d96d 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:39f0f3f2be02ef036e297e376fe3b6256775576da8a6ccb1d5eeb80f4c8bf8fb -# created: 2023-07-17T15:20:13.819193964Z + digest: sha256:f8ca7655fa8a449cadcabcbce4054f593dcbae7aeeab34aa3fcc8b5cf7a93c9e +# created: 2023-07-21T02:12:46.49799314Z diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index 67d70a110..b563eb284 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -396,9 +396,9 @@ pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 # via cffi -pygments==2.13.0 \ - --hash=sha256:56a8508ae95f98e2b9bdf93a6be5ae3f7d8af858b43e02c5a2ff083726be40c1 \ - --hash=sha256:f643f331ab57ba3c9d89212ee4a2dabc6e94f117cf4eefde99a0574720d14c42 +pygments==2.15.0 \ + --hash=sha256:77a3299119af881904cd5ecd1ac6a66214b6e9bed1f2db16993b54adede64094 \ + --hash=sha256:f7e36cffc4c517fbc252861b9a6e4644ca0e5abadf9a113c72d1358ad09b9500 # via # readme-renderer # rich From 14a673901f82fa247c8027730a0bba41e0ec4757 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 26 Jul 2023 15:02:13 -0400 Subject: [PATCH 31/52] feat: add last_scanned_row_responses to FeatureFlags (#845) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add last_scanned_row_key feature PiperOrigin-RevId: 551191182 Source-Link: https://github.com/googleapis/googleapis/commit/51e04baa9eec3bee8b3e237bfd847eb06aa66d72 Source-Link: https://github.com/googleapis/googleapis-gen/commit/4b90e8ead4477eff96c31b9b0fdef36ed975b15f Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiNGI5MGU4ZWFkNDQ3N2VmZjk2YzMxYjliMGZkZWYzNmVkOTc1YjE1ZiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- google/cloud/bigtable_v2/types/feature_flags.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/google/cloud/bigtable_v2/types/feature_flags.py b/google/cloud/bigtable_v2/types/feature_flags.py index d1fd03ff6..c7a64db5e 100644 --- a/google/cloud/bigtable_v2/types/feature_flags.py +++ b/google/cloud/bigtable_v2/types/feature_flags.py @@ -48,6 +48,10 @@ class FeatureFlags(proto.Message): Notify the server that the client enables batch write flow control by requesting RateLimitInfo from MutateRowsResponse. + last_scanned_row_responses (bool): + Notify the server that the client supports the + last_scanned_row field in ReadRowsResponse for long-running + sparse scans. """ reverse_scans: bool = proto.Field( @@ -58,6 +62,10 @@ class FeatureFlags(proto.Message): proto.BOOL, number=3, ) + last_scanned_row_responses: bool = proto.Field( + proto.BOOL, + number=4, + ) __all__ = tuple(sorted(__protobuf__.manifest)) From f4f696f1a16f571921d63e594905c27d40565f47 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 27 Jul 2023 05:54:30 -0400 Subject: [PATCH 32/52] build(deps): [autoapprove] bump certifi from 2022.12.7 to 2023.7.22 (#844) Source-Link: https://github.com/googleapis/synthtool/commit/395d53adeeacfca00b73abf197f65f3c17c8f1e9 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:6c1cbc75c74b8bdd71dada2fa1677e9d6d78a889e9a70ee75b93d1d0543f96e1 Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 4 ++-- .kokoro/requirements.txt | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 17c21d96d..0ddd0e4d1 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:f8ca7655fa8a449cadcabcbce4054f593dcbae7aeeab34aa3fcc8b5cf7a93c9e -# created: 2023-07-21T02:12:46.49799314Z + digest: sha256:6c1cbc75c74b8bdd71dada2fa1677e9d6d78a889e9a70ee75b93d1d0543f96e1 +# created: 2023-07-25T21:01:10.396410762Z diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index b563eb284..76d9bba0f 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -20,9 +20,9 @@ cachetools==5.2.0 \ --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \ --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db # via google-auth -certifi==2022.12.7 \ - --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \ - --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18 +certifi==2023.7.22 \ + --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ + --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 # via requests cffi==1.15.1 \ --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ From 7d1593d79c2e8f090b2ff295048f48fe2eb5ab59 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 1 Aug 2023 16:18:12 -0400 Subject: [PATCH 33/52] chore: [autoapprove] Pin flake8 version (#847) Source-Link: https://github.com/googleapis/synthtool/commit/0ddbff8012e47cde4462fe3f9feab01fbc4cdfd6 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:bced5ca77c4dda0fd2f5d845d4035fc3c5d3d6b81f245246a36aee114970082b Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 4 ++-- .pre-commit-config.yaml | 2 +- noxfile.py | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 0ddd0e4d1..d71329cc8 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:6c1cbc75c74b8bdd71dada2fa1677e9d6d78a889e9a70ee75b93d1d0543f96e1 -# created: 2023-07-25T21:01:10.396410762Z + digest: sha256:bced5ca77c4dda0fd2f5d845d4035fc3c5d3d6b81f245246a36aee114970082b +# created: 2023-08-01T17:41:45.434027321Z diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9e3898fd1..19409cbd3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,6 +26,6 @@ repos: hooks: - id: black - repo: https://github.com/pycqa/flake8 - rev: 3.9.2 + rev: 6.1.0 hooks: - id: flake8 diff --git a/noxfile.py b/noxfile.py index 837b60887..b2820b309 100644 --- a/noxfile.py +++ b/noxfile.py @@ -25,6 +25,7 @@ import nox +FLAKE8_VERSION = "flake8==6.1.0" BLACK_VERSION = "black==22.3.0" ISORT_VERSION = "isort==5.10.1" LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] @@ -83,7 +84,7 @@ def lint(session): Returns a failure if the linters find linting errors or sufficiently serious code quality issues. """ - session.install("flake8", BLACK_VERSION) + session.install(FLAKE8_VERSION, BLACK_VERSION) session.run( "black", "--check", From 5ebe2312dab70210811fca68c6625d2546442afd Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 2 Aug 2023 11:22:12 -0400 Subject: [PATCH 34/52] docs: Minor formatting (#851) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: Minor formatting PiperOrigin-RevId: 553099804 Source-Link: https://github.com/googleapis/googleapis/commit/f48d1a329db8655ccf843c814026060436111161 Source-Link: https://github.com/googleapis/googleapis-gen/commit/9607990f4c3217bac6edd8131614cfcc71744a6f Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOTYwNzk5MGY0YzMyMTdiYWM2ZWRkODEzMTYxNGNmY2M3MTc0NGE2ZiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../bigtable_instance_admin/async_client.py | 70 +------ .../bigtable_instance_admin/client.py | 70 +------ .../transports/rest.py | 172 +++++++++--------- .../bigtable_table_admin/async_client.py | 70 +------ .../services/bigtable_table_admin/client.py | 70 +------ .../bigtable_table_admin/transports/rest.py | 172 +++++++++--------- 6 files changed, 188 insertions(+), 436 deletions(-) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 4b45774f0..31e68fe95 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1919,42 +1919,11 @@ async def get_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM @@ -2068,42 +2037,11 @@ async def set_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index fb993b651..d38105a2a 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -2143,42 +2143,11 @@ def get_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM @@ -2279,42 +2248,11 @@ def set_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 228f5c02c..25bb0a0b9 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -1612,54 +1612,54 @@ def __call__( :: - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } **YAML example:** :: - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, see the `IAM documentation `__. @@ -2439,54 +2439,54 @@ def __call__( :: - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } **YAML example:** :: - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, see the `IAM documentation `__. diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index a7eed5d37..31ddfac72 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -2337,42 +2337,11 @@ async def get_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM @@ -2486,42 +2455,11 @@ async def set_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 037464954..b7127c032 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -2592,42 +2592,11 @@ def get_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM @@ -2728,42 +2697,11 @@ def set_iam_policy( **JSON example:** - { - "bindings": [ - { - "role": - "roles/resourcemanager.organizationAdmin", - "members": [ "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - - }, { "role": - "roles/resourcemanager.organizationViewer", - "members": [ "user:eve@example.com" ], - "condition": { "title": "expirable access", - "description": "Does not grant access after - Sep 2020", "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", } } - - ], "etag": "BwWWja0YfJA=", "version": 3 - - } + :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` **YAML example:** - bindings: - members: - user:\ mike@example.com - - group:\ admins@example.com - domain:google.com - - serviceAccount:\ my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - user:\ eve@example.com role: - roles/resourcemanager.organizationViewer - condition: title: expirable access description: - Does not grant access after Sep 2020 expression: - request.time < - timestamp('2020-10-01T00:00:00.000Z') etag: - BwWWja0YfJA= version: 3 + :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` For a description of IAM and its features, see the [IAM diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 817916977..37518c8da 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -1881,54 +1881,54 @@ def __call__( :: - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } **YAML example:** :: - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, see the `IAM documentation `__. @@ -2733,54 +2733,54 @@ def __call__( :: - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } + { + "bindings": [ + { + "role": "roles/resourcemanager.organizationAdmin", + "members": [ + "user:mike@example.com", + "group:admins@example.com", + "domain:google.com", + "serviceAccount:my-project-id@appspot.gserviceaccount.com" + ] + }, + { + "role": "roles/resourcemanager.organizationViewer", + "members": [ + "user:eve@example.com" + ], + "condition": { + "title": "expirable access", + "description": "Does not grant access after Sep 2020", + "expression": "request.time < + timestamp('2020-10-01T00:00:00.000Z')", + } + } + ], + "etag": "BwWWja0YfJA=", + "version": 3 + } **YAML example:** :: - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 + bindings: + - members: + - user:mike@example.com + - group:admins@example.com + - domain:google.com + - serviceAccount:my-project-id@appspot.gserviceaccount.com + role: roles/resourcemanager.organizationAdmin + - members: + - user:eve@example.com + role: roles/resourcemanager.organizationViewer + condition: + title: expirable access + description: Does not grant access after Sep 2020 + expression: request.time < timestamp('2020-10-01T00:00:00.000Z') + etag: BwWWja0YfJA= + version: 3 For a description of IAM and its features, see the `IAM documentation `__. From c223be457d67db0c065857439265171078c095cf Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 2 Aug 2023 11:22:39 -0400 Subject: [PATCH 35/52] build: [autoapprove] bump cryptography from 41.0.2 to 41.0.3 (#850) Source-Link: https://github.com/googleapis/synthtool/commit/352b9d4c068ce7c05908172af128b294073bf53c Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:3e3800bb100af5d7f9e810d48212b37812c1856d20ffeafb99ebe66461b61fc7 Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 4 ++-- .kokoro/requirements.txt | 48 +++++++++++++++++++-------------------- 2 files changed, 26 insertions(+), 26 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index d71329cc8..a3da1b0d4 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:bced5ca77c4dda0fd2f5d845d4035fc3c5d3d6b81f245246a36aee114970082b -# created: 2023-08-01T17:41:45.434027321Z + digest: sha256:3e3800bb100af5d7f9e810d48212b37812c1856d20ffeafb99ebe66461b61fc7 +# created: 2023-08-02T10:53:29.114535628Z diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index 76d9bba0f..029bd342d 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -113,30 +113,30 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==41.0.2 \ - --hash=sha256:01f1d9e537f9a15b037d5d9ee442b8c22e3ae11ce65ea1f3316a41c78756b711 \ - --hash=sha256:079347de771f9282fbfe0e0236c716686950c19dee1b76240ab09ce1624d76d7 \ - --hash=sha256:182be4171f9332b6741ee818ec27daff9fb00349f706629f5cbf417bd50e66fd \ - --hash=sha256:192255f539d7a89f2102d07d7375b1e0a81f7478925b3bc2e0549ebf739dae0e \ - --hash=sha256:2a034bf7d9ca894720f2ec1d8b7b5832d7e363571828037f9e0c4f18c1b58a58 \ - --hash=sha256:342f3767e25876751e14f8459ad85e77e660537ca0a066e10e75df9c9e9099f0 \ - --hash=sha256:439c3cc4c0d42fa999b83ded80a9a1fb54d53c58d6e59234cfe97f241e6c781d \ - --hash=sha256:49c3222bb8f8e800aead2e376cbef687bc9e3cb9b58b29a261210456a7783d83 \ - --hash=sha256:674b669d5daa64206c38e507808aae49904c988fa0a71c935e7006a3e1e83831 \ - --hash=sha256:7a9a3bced53b7f09da251685224d6a260c3cb291768f54954e28f03ef14e3766 \ - --hash=sha256:7af244b012711a26196450d34f483357e42aeddb04128885d95a69bd8b14b69b \ - --hash=sha256:7d230bf856164de164ecb615ccc14c7fc6de6906ddd5b491f3af90d3514c925c \ - --hash=sha256:84609ade00a6ec59a89729e87a503c6e36af98ddcd566d5f3be52e29ba993182 \ - --hash=sha256:9a6673c1828db6270b76b22cc696f40cde9043eb90373da5c2f8f2158957f42f \ - --hash=sha256:9b6d717393dbae53d4e52684ef4f022444fc1cce3c48c38cb74fca29e1f08eaa \ - --hash=sha256:9c3fe6534d59d071ee82081ca3d71eed3210f76ebd0361798c74abc2bcf347d4 \ - --hash=sha256:a719399b99377b218dac6cf547b6ec54e6ef20207b6165126a280b0ce97e0d2a \ - --hash=sha256:b332cba64d99a70c1e0836902720887fb4529ea49ea7f5462cf6640e095e11d2 \ - --hash=sha256:d124682c7a23c9764e54ca9ab5b308b14b18eba02722b8659fb238546de83a76 \ - --hash=sha256:d73f419a56d74fef257955f51b18d046f3506270a5fd2ac5febbfa259d6c0fa5 \ - --hash=sha256:f0dc40e6f7aa37af01aba07277d3d64d5a03dc66d682097541ec4da03cc140ee \ - --hash=sha256:f14ad275364c8b4e525d018f6716537ae7b6d369c094805cae45300847e0894f \ - --hash=sha256:f772610fe364372de33d76edcd313636a25684edb94cee53fd790195f5989d14 +cryptography==41.0.3 \ + --hash=sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306 \ + --hash=sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84 \ + --hash=sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47 \ + --hash=sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d \ + --hash=sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116 \ + --hash=sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207 \ + --hash=sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81 \ + --hash=sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087 \ + --hash=sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd \ + --hash=sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507 \ + --hash=sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858 \ + --hash=sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae \ + --hash=sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34 \ + --hash=sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906 \ + --hash=sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd \ + --hash=sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922 \ + --hash=sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7 \ + --hash=sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4 \ + --hash=sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574 \ + --hash=sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1 \ + --hash=sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c \ + --hash=sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e \ + --hash=sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de # via # gcp-releasetool # secretstorage From 9b1f9c225ab3c2876171ba5a2982d5af5d14dcb0 Mon Sep 17 00:00:00 2001 From: "release-please[bot]" <55107282+release-please[bot]@users.noreply.github.com> Date: Wed, 9 Aug 2023 09:35:39 -0400 Subject: [PATCH 36/52] chore(main): release 2.21.0 (#846) Co-authored-by: release-please[bot] <55107282+release-please[bot]@users.noreply.github.com> --- .release-please-manifest.json | 2 +- CHANGELOG.md | 12 ++++++++++++ google/cloud/bigtable/gapic_version.py | 2 +- google/cloud/bigtable_admin/gapic_version.py | 2 +- google/cloud/bigtable_admin_v2/gapic_version.py | 2 +- google/cloud/bigtable_v2/gapic_version.py | 2 +- 6 files changed, 17 insertions(+), 5 deletions(-) diff --git a/.release-please-manifest.json b/.release-please-manifest.json index ba3e06a78..5be20145a 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "2.20.0" + ".": "2.21.0" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 2e86e4d43..1a2a6ad3a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,18 @@ [1]: https://pypi.org/project/google-cloud-bigtable/#history +## [2.21.0](https://github.com/googleapis/python-bigtable/compare/v2.20.0...v2.21.0) (2023-08-02) + + +### Features + +* Add last_scanned_row_responses to FeatureFlags ([#845](https://github.com/googleapis/python-bigtable/issues/845)) ([14a6739](https://github.com/googleapis/python-bigtable/commit/14a673901f82fa247c8027730a0bba41e0ec4757)) + + +### Documentation + +* Minor formatting ([#851](https://github.com/googleapis/python-bigtable/issues/851)) ([5ebe231](https://github.com/googleapis/python-bigtable/commit/5ebe2312dab70210811fca68c6625d2546442afd)) + ## [2.20.0](https://github.com/googleapis/python-bigtable/compare/v2.19.0...v2.20.0) (2023-07-17) diff --git a/google/cloud/bigtable/gapic_version.py b/google/cloud/bigtable/gapic_version.py index 551f0d2eb..e546bae05 100644 --- a/google/cloud/bigtable/gapic_version.py +++ b/google/cloud/bigtable/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.20.0" # {x-release-please-version} +__version__ = "2.21.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin/gapic_version.py b/google/cloud/bigtable_admin/gapic_version.py index 551f0d2eb..e546bae05 100644 --- a/google/cloud/bigtable_admin/gapic_version.py +++ b/google/cloud/bigtable_admin/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.20.0" # {x-release-please-version} +__version__ = "2.21.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_admin_v2/gapic_version.py b/google/cloud/bigtable_admin_v2/gapic_version.py index 551f0d2eb..e546bae05 100644 --- a/google/cloud/bigtable_admin_v2/gapic_version.py +++ b/google/cloud/bigtable_admin_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.20.0" # {x-release-please-version} +__version__ = "2.21.0" # {x-release-please-version} diff --git a/google/cloud/bigtable_v2/gapic_version.py b/google/cloud/bigtable_v2/gapic_version.py index 551f0d2eb..e546bae05 100644 --- a/google/cloud/bigtable_v2/gapic_version.py +++ b/google/cloud/bigtable_v2/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "2.20.0" # {x-release-please-version} +__version__ = "2.21.0" # {x-release-please-version} From 4105df762f1318c49bba030063897f0c50e4daee Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 23 Aug 2023 15:53:15 -0700 Subject: [PATCH 37/52] feat: publish CopyBackup protos to external customers (#855) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: publish CopyBackup protos to external customers PiperOrigin-RevId: 557192020 Source-Link: https://github.com/googleapis/googleapis/commit/b4c238feaa1097c53798ed77035bbfeb7fc72e96 Source-Link: https://github.com/googleapis/googleapis-gen/commit/feccb30e3177da8b7b7e68149ca4bb914f8faf2a Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZmVjY2IzMGUzMTc3ZGE4YjdiN2U2ODE0OWNhNGJiOTE0ZjhmYWYyYSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- google/cloud/bigtable_admin/__init__.py | 4 + google/cloud/bigtable_admin_v2/__init__.py | 4 + .../bigtable_admin_v2/gapic_metadata.json | 15 + .../bigtable_table_admin/async_client.py | 142 ++++- .../services/bigtable_table_admin/client.py | 142 ++++- .../bigtable_table_admin/transports/base.py | 14 + .../bigtable_table_admin/transports/grpc.py | 33 +- .../transports/grpc_asyncio.py | 35 +- .../bigtable_table_admin/transports/rest.py | 136 +++++ .../cloud/bigtable_admin_v2/types/__init__.py | 4 + .../types/bigtable_table_admin.py | 104 +++- google/cloud/bigtable_admin_v2/types/table.py | 56 +- scripts/fixup_bigtable_admin_v2_keywords.py | 1 + .../test_bigtable_table_admin.py | 568 ++++++++++++++++++ 14 files changed, 1221 insertions(+), 37 deletions(-) diff --git a/google/cloud/bigtable_admin/__init__.py b/google/cloud/bigtable_admin/__init__.py index 43535ae20..d26d79b3c 100644 --- a/google/cloud/bigtable_admin/__init__.py +++ b/google/cloud/bigtable_admin/__init__.py @@ -115,6 +115,8 @@ from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CheckConsistencyResponse, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupRequest from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CreateBackupMetadata, ) @@ -242,6 +244,8 @@ "UpdateInstanceMetadata", "CheckConsistencyRequest", "CheckConsistencyResponse", + "CopyBackupMetadata", + "CopyBackupRequest", "CreateBackupMetadata", "CreateBackupRequest", "CreateTableFromSnapshotMetadata", diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py index 8033a0af7..811b956e0 100644 --- a/google/cloud/bigtable_admin_v2/__init__.py +++ b/google/cloud/bigtable_admin_v2/__init__.py @@ -51,6 +51,8 @@ from .types.bigtable_instance_admin import UpdateInstanceMetadata from .types.bigtable_table_admin import CheckConsistencyRequest from .types.bigtable_table_admin import CheckConsistencyResponse +from .types.bigtable_table_admin import CopyBackupMetadata +from .types.bigtable_table_admin import CopyBackupRequest from .types.bigtable_table_admin import CreateBackupMetadata from .types.bigtable_table_admin import CreateBackupRequest from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata @@ -116,6 +118,8 @@ "CheckConsistencyResponse", "Cluster", "ColumnFamily", + "CopyBackupMetadata", + "CopyBackupRequest", "CreateAppProfileRequest", "CreateBackupMetadata", "CreateBackupRequest", diff --git a/google/cloud/bigtable_admin_v2/gapic_metadata.json b/google/cloud/bigtable_admin_v2/gapic_metadata.json index d797338cc..9b3426470 100644 --- a/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ b/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -349,6 +349,11 @@ "check_consistency" ] }, + "CopyBackup": { + "methods": [ + "copy_backup" + ] + }, "CreateBackup": { "methods": [ "create_backup" @@ -474,6 +479,11 @@ "check_consistency" ] }, + "CopyBackup": { + "methods": [ + "copy_backup" + ] + }, "CreateBackup": { "methods": [ "create_backup" @@ -599,6 +609,11 @@ "check_consistency" ] }, + "CopyBackup": { + "methods": [ + "copy_backup" + ] + }, "CreateBackup": { "methods": [ "create_backup" diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 31ddfac72..d1c1811a6 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -2143,7 +2143,7 @@ async def list_backups( Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager: The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. Iterating over this object will yield results and resolve additional pages automatically. @@ -2218,9 +2218,8 @@ async def restore_table( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: - r"""Create a new table by restoring from a completed backup. The new - table must be in the same project as the instance containing the - backup. The returned table [long-running + r"""Create a new table by restoring from a completed backup. The + returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -2283,6 +2282,141 @@ async def restore_table( # Done; return the response. return response + async def copy_backup( + self, + request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, + *, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + source_backup: Optional[str] = None, + expire_time: Optional[timestamp_pb2.Timestamp] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]]): + The request object. The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + parent (:class:`str`): + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already + exists. Values are of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (:class:`str`): + Required. The id of the new backup. The ``backup_id`` + along with ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup + name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_backup (:class:`str`): + Required. The source backup to be copied from. The + source backup needs to be in READY state for it to be + copied. Copying a copied backup is not allowed. Once + CopyBackup is in progress, the source backup cannot be + deleted or cleaned up on expiration until CopyBackup is + finished. Values are of the form: + ``projects//instances//clusters//backups/``. + + This corresponds to the ``source_backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + expire_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): + Required. Required. The expiration time of the copied + backup with microsecond granularity that must be at + least 6 hours and at most 30 days from the time the + request is received. Once the ``expire_time`` has + passed, Cloud Bigtable will delete the backup and free + the resources used by the backup. + + This corresponds to the ``expire_time`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, source_backup, expire_time]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.CopyBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if source_backup is not None: + request.source_backup = source_backup + if expire_time is not None: + request.expire_time = expire_time + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.copy_backup, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CopyBackupMetadata, + ) + + # Done; return the response. + return response + async def get_iam_policy( self, request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index b7127c032..80231fce9 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -2407,7 +2407,7 @@ def list_backups( Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager: The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. Iterating over this object will yield results and resolve additional pages automatically. @@ -2472,9 +2472,8 @@ def restore_table( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: - r"""Create a new table by restoring from a completed backup. The new - table must be in the same project as the instance containing the - backup. The returned table [long-running + r"""Create a new table by restoring from a completed backup. The + returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -2538,6 +2537,141 @@ def restore_table( # Done; return the response. return response + def copy_backup( + self, + request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, + *, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + source_backup: Optional[str] = None, + expire_time: Optional[timestamp_pb2.Timestamp] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]): + The request object. The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + parent (str): + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already + exists. Values are of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (str): + Required. The id of the new backup. The ``backup_id`` + along with ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup + name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_backup (str): + Required. The source backup to be copied from. The + source backup needs to be in READY state for it to be + copied. Copying a copied backup is not allowed. Once + CopyBackup is in progress, the source backup cannot be + deleted or cleaned up on expiration until CopyBackup is + finished. Values are of the form: + ``projects//instances//clusters//backups/``. + + This corresponds to the ``source_backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Required. Required. The expiration time of the copied + backup with microsecond granularity that must be at + least 6 hours and at most 30 days from the time the + request is received. Once the ``expire_time`` has + passed, Cloud Bigtable will delete the backup and free + the resources used by the backup. + + This corresponds to the ``expire_time`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, source_backup, expire_time]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CopyBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CopyBackupRequest): + request = bigtable_table_admin.CopyBackupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if source_backup is not None: + request.source_backup = source_backup + if expire_time is not None: + request.expire_time = expire_time + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.copy_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CopyBackupMetadata, + ) + + # Done; return the response. + return response + def get_iam_policy( self, request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 591c6bcfe..c3cf01a96 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -322,6 +322,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.copy_backup: gapic_v1.method.wrap_method( + self.copy_backup, + default_timeout=None, + client_info=client_info, + ), self.get_iam_policy: gapic_v1.method.wrap_method( self.get_iam_policy, default_retry=retries.Retry( @@ -577,6 +582,15 @@ def restore_table( ]: raise NotImplementedError() + @property + def copy_backup( + self, + ) -> Callable[ + [bigtable_table_admin.CopyBackupRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + @property def get_iam_policy( self, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index e18be126c..34a1596fc 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -866,9 +866,8 @@ def restore_table( ) -> Callable[[bigtable_table_admin.RestoreTableRequest], operations_pb2.Operation]: r"""Return a callable for the restore table method over gRPC. - Create a new table by restoring from a completed backup. The new - table must be in the same project as the instance containing the - backup. The returned table [long-running + Create a new table by restoring from a completed backup. The + returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -894,6 +893,34 @@ def restore_table( ) return self._stubs["restore_table"] + @property + def copy_backup( + self, + ) -> Callable[[bigtable_table_admin.CopyBackupRequest], operations_pb2.Operation]: + r"""Return a callable for the copy backup method over gRPC. + + Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Returns: + Callable[[~.CopyBackupRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "copy_backup" not in self._stubs: + self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", + request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["copy_backup"] + @property def get_iam_policy( self, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 8f72e3fe8..b19d4e7be 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -890,9 +890,8 @@ def restore_table( ]: r"""Return a callable for the restore table method over gRPC. - Create a new table by restoring from a completed backup. The new - table must be in the same project as the instance containing the - backup. The returned table [long-running + Create a new table by restoring from a completed backup. The + returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -918,6 +917,36 @@ def restore_table( ) return self._stubs["restore_table"] + @property + def copy_backup( + self, + ) -> Callable[ + [bigtable_table_admin.CopyBackupRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the copy backup method over gRPC. + + Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Returns: + Callable[[~.CopyBackupRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "copy_backup" not in self._stubs: + self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", + request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["copy_backup"] + @property def get_iam_policy( self, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 37518c8da..4b3a846dc 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -84,6 +84,14 @@ def post_check_consistency(self, response): logging.log(f"Received response: {response}") return response + def pre_copy_backup(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_copy_backup(self, response): + logging.log(f"Received response: {response}") + return response + def pre_create_backup(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -281,6 +289,29 @@ def post_check_consistency( """ return response + def pre_copy_backup( + self, + request: bigtable_table_admin.CopyBackupRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.CopyBackupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for copy_backup + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_copy_backup( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for copy_backup + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_create_backup( self, request: bigtable_table_admin.CreateBackupRequest, @@ -1010,6 +1041,103 @@ def __call__( resp = self._interceptor.post_check_consistency(resp) return resp + class _CopyBackup(BigtableTableAdminRestStub): + def __hash__(self): + return hash("CopyBackup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.CopyBackupRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the copy backup method over HTTP. + + Args: + request (~.bigtable_table_admin.CopyBackupRequest): + The request object. The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_copy_backup(request, metadata) + pb_request = bigtable_table_admin.CopyBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_copy_backup(resp) + return resp + class _CreateBackup(BigtableTableAdminRestStub): def __hash__(self): return hash("CreateBackup") @@ -3360,6 +3488,14 @@ def check_consistency( # In C++ this would require a dynamic_cast return self._CheckConsistency(self._session, self._host, self._interceptor) # type: ignore + @property + def copy_backup( + self, + ) -> Callable[[bigtable_table_admin.CopyBackupRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CopyBackup(self._session, self._host, self._interceptor) # type: ignore + @property def create_backup( self, diff --git a/google/cloud/bigtable_admin_v2/types/__init__.py b/google/cloud/bigtable_admin_v2/types/__init__.py index c69e3129b..a2fefffc8 100644 --- a/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/google/cloud/bigtable_admin_v2/types/__init__.py @@ -44,6 +44,8 @@ from .bigtable_table_admin import ( CheckConsistencyRequest, CheckConsistencyResponse, + CopyBackupMetadata, + CopyBackupRequest, CreateBackupMetadata, CreateBackupRequest, CreateTableFromSnapshotMetadata, @@ -130,6 +132,8 @@ "UpdateInstanceMetadata", "CheckConsistencyRequest", "CheckConsistencyResponse", + "CopyBackupMetadata", + "CopyBackupRequest", "CreateBackupMetadata", "CreateBackupRequest", "CreateTableFromSnapshotMetadata", diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index dfa815dc2..2b108351a 100644 --- a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -62,6 +62,8 @@ "DeleteBackupRequest", "ListBackupsRequest", "ListBackupsResponse", + "CopyBackupRequest", + "CopyBackupMetadata", }, ) @@ -76,8 +78,7 @@ class RestoreTableRequest(proto.Message): Attributes: parent (str): Required. The name of the instance in which to create the - restored table. This instance must be in the same project as - the source backup. Values are of the form + restored table. Values are of the form ``projects//instances/``. table_id (str): Required. The id of the table to create and restore to. This @@ -359,7 +360,7 @@ class ListTablesRequest(proto.Message): should be listed. Values are of the form ``projects/{project}/instances/{instance}``. view (google.cloud.bigtable_admin_v2.types.Table.View): - The view to be applied to the returned tables' fields. Only + The view to be applied to the returned tables' fields. NAME_ONLY view (default) and REPLICATION_VIEW are supported. page_size (int): Maximum number of results per page. @@ -1192,8 +1193,15 @@ class ListBackupsRequest(proto.Message): fields in [Backup][google.bigtable.admin.v2.Backup]. The full syntax is described at https://aip.dev/132#ordering. - Fields supported are: \* name \* source_table \* expire_time - \* start_time \* end_time \* size_bytes \* state + Fields supported are: + + - name + - source_table + - expire_time + - start_time + - end_time + - size_bytes + - state For example, "start_time". The default sorting order is ascending. To specify descending order for the field, a @@ -1266,4 +1274,90 @@ def raw_page(self): ) +class CopyBackupRequest(proto.Message): + r"""The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + + Attributes: + parent (str): + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already exists. + Values are of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + backup_id (str): + Required. The id of the new backup. The ``backup_id`` along + with ``parent`` are combined as {parent}/backups/{backup_id} + to create the full backup name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in length + and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + source_backup (str): + Required. The source backup to be copied from. The source + backup needs to be in READY state for it to be copied. + Copying a copied backup is not allowed. Once CopyBackup is + in progress, the source backup cannot be deleted or cleaned + up on expiration until CopyBackup is finished. Values are of + the form: + ``projects//instances//clusters//backups/``. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Required. Required. The expiration time of the copied backup + with microsecond granularity that must be at least 6 hours + and at most 30 days from the time the request is received. + Once the ``expire_time`` has passed, Cloud Bigtable will + delete the backup and free the resources used by the backup. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + backup_id: str = proto.Field( + proto.STRING, + number=2, + ) + source_backup: str = proto.Field( + proto.STRING, + number=3, + ) + expire_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class CopyBackupMetadata(proto.Message): + r"""Metadata type for the google.longrunning.Operation returned by + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + + Attributes: + name (str): + The name of the backup being created through the copy + operation. Values are of the form + ``projects//instances//clusters//backups/``. + source_backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): + Information about the source backup that is + being copied from. + progress (google.cloud.bigtable_admin_v2.types.OperationProgress): + The progress of the + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup] + operation. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + source_backup_info: gba_table.BackupInfo = proto.Field( + proto.MESSAGE, + number=2, + message=gba_table.BackupInfo, + ) + progress: common.OperationProgress = proto.Field( + proto.MESSAGE, + number=3, + message=common.OperationProgress, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py index e75ac00bb..6b885203d 100644 --- a/google/cloud/bigtable_admin_v2/types/table.py +++ b/google/cloud/bigtable_admin_v2/types/table.py @@ -124,7 +124,8 @@ class Table(proto.Message): ``REPLICATION_VIEW``, ``ENCRYPTION_VIEW``, ``FULL`` column_families (MutableMapping[str, google.cloud.bigtable_admin_v2.types.ColumnFamily]): The column families configured for this table, mapped by - column family ID. Views: ``SCHEMA_VIEW``, ``FULL`` + column family ID. Views: ``SCHEMA_VIEW``, ``STATS_VIEW``, + ``FULL`` granularity (google.cloud.bigtable_admin_v2.types.Table.TimestampGranularity): Immutable. The granularity (i.e. ``MILLIS``) at which timestamps are stored in this table. Timestamps not matching @@ -141,15 +142,16 @@ class Table(proto.Message): this table. Otherwise, the change stream is disabled and the change stream is not retained. deletion_protection (bool): - Set to true to make the table protected - against data loss. i.e. deleting the following - resources through Admin APIs are prohibited: - - - The table. - - The column families in the table. - - The instance containing the table. - Note one can still delete the data stored in the - table through Data APIs. + Set to true to make the table protected against data loss. + i.e. deleting the following resources through Admin APIs are + prohibited: + + - The table. + - The column families in the table. + - The instance containing the table. + + Note one can still delete the data stored in the table + through Data APIs. """ class TimestampGranularity(proto.Enum): @@ -487,8 +489,7 @@ class Snapshot(proto.Message): Attributes: name (str): - Output only. The unique name of the snapshot. Values are of - the form + The unique name of the snapshot. Values are of the form ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. source_table (google.cloud.bigtable_admin_v2.types.Table): Output only. The source table at the time the @@ -503,16 +504,15 @@ class Snapshot(proto.Message): Output only. The time when the snapshot is created. delete_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time when the snapshot will - be deleted. The maximum amount of time a - snapshot can stay active is 365 days. If 'ttl' - is not specified, the default maximum of 365 - days will be used. + The time when the snapshot will be deleted. + The maximum amount of time a snapshot can stay + active is 365 days. If 'ttl' is not specified, + the default maximum of 365 days will be used. state (google.cloud.bigtable_admin_v2.types.Snapshot.State): Output only. The current state of the snapshot. description (str): - Output only. Description of the snapshot. + Description of the snapshot. """ class State(proto.Enum): @@ -588,6 +588,12 @@ class Backup(proto.Message): backup was created. This needs to be in the same instance as the backup. Values are of the form ``projects/{project}/instances/{instance}/tables/{source_table}``. + source_backup (str): + Output only. Name of the backup from which + this backup was copied. If a backup is not + created by copying a backup, this field will be + empty. Values are of the form: + projects//instances//backups/. expire_time (google.protobuf.timestamp_pb2.Timestamp): Required. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and @@ -637,6 +643,10 @@ class State(proto.Enum): proto.STRING, number=2, ) + source_backup: str = proto.Field( + proto.STRING, + number=10, + ) expire_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, @@ -685,6 +695,12 @@ class BackupInfo(proto.Message): source_table (str): Output only. Name of the table the backup was created from. + source_backup (str): + Output only. Name of the backup from which + this backup was copied. If a backup is not + created by copying a backup, this field will be + empty. Values are of the form: + projects//instances//backups/. """ backup: str = proto.Field( @@ -705,6 +721,10 @@ class BackupInfo(proto.Message): proto.STRING, number=4, ) + source_backup: str = proto.Field( + proto.STRING, + number=10, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/scripts/fixup_bigtable_admin_v2_keywords.py b/scripts/fixup_bigtable_admin_v2_keywords.py index 58eab8bcf..6882feaf6 100644 --- a/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/scripts/fixup_bigtable_admin_v2_keywords.py @@ -40,6 +40,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'check_consistency': ('name', 'consistency_token', ), + 'copy_backup': ('parent', 'backup_id', 'source_backup', 'expire_time', ), 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), 'create_backup': ('parent', 'backup_id', 'backup', ), 'create_cluster': ('parent', 'cluster_id', 'cluster', ), diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 0a0b3b671..a537cb9f0 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -4960,6 +4960,7 @@ def test_get_backup(request_type, transport: str = "grpc"): call.return_value = table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -4974,6 +4975,7 @@ def test_get_backup(request_type, transport: str = "grpc"): assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -5014,6 +5016,7 @@ async def test_get_backup_async( table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -5029,6 +5032,7 @@ async def test_get_backup_async( assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -5200,6 +5204,7 @@ def test_update_backup(request_type, transport: str = "grpc"): call.return_value = table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -5214,6 +5219,7 @@ def test_update_backup(request_type, transport: str = "grpc"): assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -5255,6 +5261,7 @@ async def test_update_backup_async( table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -5270,6 +5277,7 @@ async def test_update_backup_async( assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -6217,6 +6225,262 @@ async def test_restore_table_field_headers_async(): ) in kw["metadata"] +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CopyBackupRequest, + dict, + ], +) +def test_copy_backup(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_copy_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + client.copy_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + + +@pytest.mark.asyncio +async def test_copy_backup_async( + transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CopyBackupRequest +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_copy_backup_async_from_dict(): + await test_copy_backup_async(request_type=dict) + + +def test_copy_backup_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CopyBackupRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_copy_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CopyBackupRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_copy_backup_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.copy_backup( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].source_backup + mock_val = "source_backup_value" + assert arg == mock_val + assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( + seconds=751 + ) + + +def test_copy_backup_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +@pytest.mark.asyncio +async def test_copy_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.copy_backup( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].source_backup + mock_val = "source_backup_value" + assert arg == mock_val + assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( + seconds=751 + ) + + +@pytest.mark.asyncio +async def test_copy_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + @pytest.mark.parametrize( "request_type", [ @@ -8206,6 +8470,7 @@ def test_update_table_rest(request_type): "start_time": {"seconds": 751, "nanos": 543}, "end_time": {}, "source_table": "source_table_value", + "source_backup": "source_backup_value", }, }, "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, @@ -8404,6 +8669,7 @@ def test_update_table_rest_bad_request( "start_time": {"seconds": 751, "nanos": 543}, "end_time": {}, "source_table": "source_table_value", + "source_backup": "source_backup_value", }, }, "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, @@ -11246,6 +11512,7 @@ def test_create_backup_rest(request_type): request_init["backup"] = { "name": "name_value", "source_table": "source_table_value", + "source_backup": "source_backup_value", "expire_time": {"seconds": 751, "nanos": 543}, "start_time": {}, "end_time": {}, @@ -11467,6 +11734,7 @@ def test_create_backup_rest_bad_request( request_init["backup"] = { "name": "name_value", "source_table": "source_table_value", + "source_backup": "source_backup_value", "expire_time": {"seconds": 751, "nanos": 543}, "start_time": {}, "end_time": {}, @@ -11593,6 +11861,7 @@ def test_get_backup_rest(request_type): return_value = table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -11611,6 +11880,7 @@ def test_get_backup_rest(request_type): assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -11868,6 +12138,7 @@ def test_update_backup_rest(request_type): request_init["backup"] = { "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", "source_table": "source_table_value", + "source_backup": "source_backup_value", "expire_time": {"seconds": 751, "nanos": 543}, "start_time": {}, "end_time": {}, @@ -11896,6 +12167,7 @@ def test_update_backup_rest(request_type): return_value = table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -11914,6 +12186,7 @@ def test_update_backup_rest(request_type): assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -12082,6 +12355,7 @@ def test_update_backup_rest_bad_request( request_init["backup"] = { "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", "source_table": "source_table_value", + "source_backup": "source_backup_value", "expire_time": {"seconds": 751, "nanos": 543}, "start_time": {}, "end_time": {}, @@ -13012,6 +13286,296 @@ def test_restore_table_rest_error(): ) +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CopyBackupRequest, + dict, + ], +) +def test_copy_backup_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.copy_backup(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_copy_backup_rest_required_fields( + request_type=bigtable_table_admin.CopyBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["backup_id"] = "" + request_init["source_backup"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["backupId"] = "backup_id_value" + jsonified_request["sourceBackup"] = "source_backup_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == "backup_id_value" + assert "sourceBackup" in jsonified_request + assert jsonified_request["sourceBackup"] == "source_backup_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.copy_backup(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_copy_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.copy_backup._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "backupId", + "sourceBackup", + "expireTime", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_copy_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_copy_backup" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_copy_backup" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CopyBackupRequest.pb( + bigtable_table_admin.CopyBackupRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_table_admin.CopyBackupRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.copy_backup( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_copy_backup_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.CopyBackupRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.copy_backup(request) + + +def test_copy_backup_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.copy_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy" + % client.transport._host, + args[1], + ) + + +def test_copy_backup_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +def test_copy_backup_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + @pytest.mark.parametrize( "request_type", [ @@ -14001,6 +14565,7 @@ def test_bigtable_table_admin_base_transport(): "delete_backup", "list_backups", "restore_table", + "copy_backup", "get_iam_policy", "set_iam_policy", "test_iam_permissions", @@ -14377,6 +14942,9 @@ def test_bigtable_table_admin_client_transport_session_collision(transport_name) session1 = client1.transport.restore_table._session session2 = client2.transport.restore_table._session assert session1 != session2 + session1 = client1.transport.copy_backup._session + session2 = client2.transport.copy_backup._session + assert session1 != session2 session1 = client1.transport.get_iam_policy._session session2 = client2.transport.get_iam_policy._session assert session1 != session2 From 09f8a4667d8b68a9f2048ba1aa57db4f775a2c03 Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Tue, 19 Sep 2023 20:59:35 -0400 Subject: [PATCH 38/52] fix: require google-cloud-core 1.4.4 (#866) * fix: require google-cloud-core 1.4.4 * Update sample to address missing dependency on `six` * Update sample to address missing dependency on `six` --- samples/hello_happybase/requirements.txt | 1 + samples/quickstart_happybase/requirements.txt | 1 + setup.py | 2 +- testing/constraints-3.7.txt | 2 +- 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/samples/hello_happybase/requirements.txt b/samples/hello_happybase/requirements.txt index a144f03e1..d3368cd0f 100644 --- a/samples/hello_happybase/requirements.txt +++ b/samples/hello_happybase/requirements.txt @@ -1 +1,2 @@ google-cloud-happybase==0.33.0 +six==1.16.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128 diff --git a/samples/quickstart_happybase/requirements.txt b/samples/quickstart_happybase/requirements.txt index a144f03e1..d3368cd0f 100644 --- a/samples/quickstart_happybase/requirements.txt +++ b/samples/quickstart_happybase/requirements.txt @@ -1 +1,2 @@ google-cloud-happybase==0.33.0 +six==1.16.0 # See https://github.com/googleapis/google-cloud-python-happybase/issues/128 diff --git a/setup.py b/setup.py index 49bb10adc..495730888 100644 --- a/setup.py +++ b/setup.py @@ -38,7 +38,7 @@ release_status = "Development Status :: 5 - Production/Stable" dependencies = [ "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", - "google-cloud-core >= 1.4.1, <3.0.0dev", + "google-cloud-core >= 1.4.4, <3.0.0dev", "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", "proto-plus >= 1.22.0, <2.0.0dev", "proto-plus >= 1.22.2, <2.0.0dev; python_version>='3.11'", diff --git a/testing/constraints-3.7.txt b/testing/constraints-3.7.txt index d14da7c0c..0718fa655 100644 --- a/testing/constraints-3.7.txt +++ b/testing/constraints-3.7.txt @@ -6,7 +6,7 @@ # e.g., if setup.py has "foo >= 1.14.0, < 2.0.0dev", # Then this file should have foo==1.14.0 google-api-core==1.34.0 -google-cloud-core==1.4.1 +google-cloud-core==1.4.4 grpc-google-iam-v1==0.12.4 proto-plus==1.22.0 libcst==0.2.5 From e5af3597f45fc4c094c59abca876374f5a866c1b Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 20 Sep 2023 10:30:37 -0400 Subject: [PATCH 39/52] fix: add feature flag for improved mutate rows throttling (#862) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: Minor formatting chore: Update gapic-generator-python to v1.11.5 build: Update rules_python to 0.24.0 PiperOrigin-RevId: 563436317 Source-Link: https://github.com/googleapis/googleapis/commit/42fd37b18d706f6f51f52f209973b3b2c28f509a Source-Link: https://github.com/googleapis/googleapis-gen/commit/280264ca02fb9316b4237a96d0af1a2343a81a56 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiMjgwMjY0Y2EwMmZiOTMxNmI0MjM3YTk2ZDBhZjFhMjM0M2E4MWE1NiJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * fix: add feature flag for improved mutate rows throttling PiperOrigin-RevId: 565090488 Source-Link: https://github.com/googleapis/googleapis/commit/e8a136feaca2547dd5566ef79841d28f76a80eb5 Source-Link: https://github.com/googleapis/googleapis-gen/commit/9a8dcca0fb2117628a1a6a6c3625a6aa32fc2f75 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOWE4ZGNjYTBmYjIxMTc2MjhhMWE2YTZjMzYyNWE2YWEzMmZjMmY3NSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot Co-authored-by: Anthonios Partheniou --- .../bigtable_instance_admin/async_client.py | 12 +++---- .../bigtable_instance_admin/client.py | 12 +++---- .../transports/rest.py | 2 +- .../bigtable_table_admin/async_client.py | 16 ++++++---- .../services/bigtable_table_admin/client.py | 16 ++++++---- .../bigtable_table_admin/transports/grpc.py | 3 ++ .../transports/grpc_asyncio.py | 3 ++ .../bigtable_table_admin/transports/rest.py | 3 +- .../types/bigtable_table_admin.py | 1 + .../cloud/bigtable_admin_v2/types/instance.py | 4 +-- google/cloud/bigtable_admin_v2/types/table.py | 2 ++ .../services/bigtable/async_client.py | 25 +++++++-------- .../bigtable_v2/services/bigtable/client.py | 25 +++++++-------- .../services/bigtable/transports/grpc.py | 7 ++--- .../bigtable/transports/grpc_asyncio.py | 7 ++--- .../services/bigtable/transports/rest.py | 18 +++++------ google/cloud/bigtable_v2/types/bigtable.py | 12 +++---- google/cloud/bigtable_v2/types/data.py | 13 ++++---- .../cloud/bigtable_v2/types/feature_flags.py | 31 +++++++++++++------ .../cloud/bigtable_v2/types/request_stats.py | 1 + .../test_bigtable_instance_admin.py | 2 +- .../test_bigtable_table_admin.py | 2 +- 22 files changed, 116 insertions(+), 101 deletions(-) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 31e68fe95..3f67620c0 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -1883,8 +1883,8 @@ async def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being requested. - See the operation documentation for the + policy is being requested. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2001,8 +2001,8 @@ async def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being specified. - See the operation documentation for the + policy is being specified. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2110,8 +2110,8 @@ async def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy detail is being requested. - See the operation documentation for the + policy detail is being requested. See + the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index d38105a2a..52c61ea4f 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -2107,8 +2107,8 @@ def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being requested. - See the operation documentation for the + policy is being requested. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2212,8 +2212,8 @@ def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being specified. - See the operation documentation for the + policy is being specified. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2318,8 +2318,8 @@ def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the - policy detail is being requested. - See the operation documentation for the + policy detail is being requested. See + the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 25bb0a0b9..9d5502b7e 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -44,8 +44,8 @@ from google.cloud.bigtable_admin_v2.types import instance from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore from .base import ( BigtableInstanceAdminTransport, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index d1c1811a6..d5edeb91d 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -358,6 +358,7 @@ async def create_table_from_snapshot( r"""Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1293,6 +1294,7 @@ async def snapshot_table( r"""Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1469,6 +1471,7 @@ async def get_snapshot( time. A snapshot can be used as a checkpoint for data restoration or a data source for a new table. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud @@ -1668,6 +1671,7 @@ async def delete_snapshot( metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -2435,8 +2439,8 @@ async def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being requested. - See the operation documentation for the + policy is being requested. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2553,8 +2557,8 @@ async def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy is being specified. - See the operation documentation for the + policy is being specified. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2662,8 +2666,8 @@ async def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (:class:`str`): REQUIRED: The resource for which the - policy detail is being requested. - See the operation documentation for the + policy detail is being requested. See + the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 80231fce9..d0c04ed11 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -685,6 +685,7 @@ def create_table_from_snapshot( r"""Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1587,6 +1588,7 @@ def snapshot_table( r"""Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -1763,6 +1765,7 @@ def get_snapshot( time. A snapshot can be used as a checkpoint for data restoration or a data source for a new table. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud @@ -1942,6 +1945,7 @@ def delete_snapshot( metadata: Sequence[Tuple[str, str]] = (), ) -> None: r"""Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -2690,8 +2694,8 @@ def get_iam_policy( The request object. Request message for ``GetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being requested. - See the operation documentation for the + policy is being requested. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2795,8 +2799,8 @@ def set_iam_policy( The request object. Request message for ``SetIamPolicy`` method. resource (str): REQUIRED: The resource for which the - policy is being specified. - See the operation documentation for the + policy is being specified. See the + operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field @@ -2901,8 +2905,8 @@ def test_iam_permissions( The request object. Request message for ``TestIamPermissions`` method. resource (str): REQUIRED: The resource for which the - policy detail is being requested. - See the operation documentation for the + policy detail is being requested. See + the operation documentation for the appropriate value for this field. This corresponds to the ``resource`` field diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 34a1596fc..d765869cd 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -294,6 +294,7 @@ def create_table_from_snapshot( Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -591,6 +592,7 @@ def snapshot_table( Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -692,6 +694,7 @@ def delete_snapshot( r"""Return a callable for the delete snapshot method over gRPC. Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index b19d4e7be..b60a7351c 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -302,6 +302,7 @@ def create_table_from_snapshot( Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -607,6 +608,7 @@ def snapshot_table( Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table must be in the same instance. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be @@ -710,6 +712,7 @@ def delete_snapshot( r"""Return a callable for the delete snapshot method over gRPC. Permanently deletes the specified snapshot. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 4b3a846dc..41b893eb7 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -45,8 +45,8 @@ from google.cloud.bigtable_admin_v2.types import table as gba_table from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore from google.protobuf import empty_pb2 # type: ignore +from google.longrunning import operations_pb2 # type: ignore from .base import ( BigtableTableAdminTransport, @@ -2172,6 +2172,7 @@ def __call__( time. A snapshot can be used as a checkpoint for data restoration or a data source for a new table. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index 2b108351a..6a3b31a1e 100644 --- a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -918,6 +918,7 @@ class DeleteSnapshotRequest(proto.Message): class SnapshotTableMetadata(proto.Message): r"""The metadata for the Operation returned by SnapshotTable. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be changed in diff --git a/google/cloud/bigtable_admin_v2/types/instance.py b/google/cloud/bigtable_admin_v2/types/instance.py index aa85eba19..6ae9159d0 100644 --- a/google/cloud/bigtable_admin_v2/types/instance.py +++ b/google/cloud/bigtable_admin_v2/types/instance.py @@ -117,8 +117,8 @@ class Type(proto.Enum): be set on the cluster. DEVELOPMENT (2): DEPRECATED: Prefer PRODUCTION for all use - cases, as it no longer enforces - a higher minimum node count than DEVELOPMENT. + cases, as it no longer enforces a higher minimum + node count than DEVELOPMENT. """ TYPE_UNSPECIFIED = 0 PRODUCTION = 1 diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py index 6b885203d..57bd1b00f 100644 --- a/google/cloud/bigtable_admin_v2/types/table.py +++ b/google/cloud/bigtable_admin_v2/types/table.py @@ -311,6 +311,7 @@ class ColumnFamily(proto.Message): gc_rule (google.cloud.bigtable_admin_v2.types.GcRule): Garbage collection rule specified as a protobuf. Must serialize to at most 500 bytes. + NOTE: Garbage collection executes opportunistically in the background, and so it's possible for reads to return a cell even if it @@ -481,6 +482,7 @@ class Snapshot(proto.Message): r"""A snapshot of a table at a particular time. A snapshot can be used as a checkpoint for data restoration or a data source for a new table. + Note: This is a private alpha release of Cloud Bigtable snapshots. This feature is not currently available to most Cloud Bigtable customers. This feature might be changed in diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py index 441022ac7..07a782d0c 100644 --- a/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -1030,8 +1030,8 @@ def generate_initial_change_stream_partitions( Args: request (Optional[Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Request message for + by Apache Beam BigtableIO. Request + message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (:class:`str`): Required. The unique name of the table from which to get @@ -1061,8 +1061,8 @@ def generate_initial_change_stream_partitions( Returns: AsyncIterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Response message for + by Apache Beam BigtableIO. Response + message for Bigtable.GenerateInitialChangeStreamPartitions. """ @@ -1123,17 +1123,15 @@ def read_change_stream( metadata: Sequence[Tuple[str, str]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadChangeStreamResponse]]: r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Reads changes from a table's change stream. Changes will - reflect both user-initiated mutations and mutations that - are caused by garbage collection. + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. Args: request (Optional[Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Request message for - Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. table_name (:class:`str`): Required. The unique name of the table from which to read a change stream. Values are of the form @@ -1162,9 +1160,8 @@ def read_change_stream( Returns: AsyncIterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Response message for - Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. Response + message for Bigtable.ReadChangeStream. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py index 595310c88..db393faa7 100644 --- a/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -1330,8 +1330,8 @@ def generate_initial_change_stream_partitions( Args: request (Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Request message for + by Apache Beam BigtableIO. Request + message for Bigtable.GenerateInitialChangeStreamPartitions. table_name (str): Required. The unique name of the table from which to get @@ -1361,8 +1361,8 @@ def generate_initial_change_stream_partitions( Returns: Iterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Response message for + by Apache Beam BigtableIO. Response + message for Bigtable.GenerateInitialChangeStreamPartitions. """ @@ -1427,17 +1427,15 @@ def read_change_stream( metadata: Sequence[Tuple[str, str]] = (), ) -> Iterable[bigtable.ReadChangeStreamResponse]: r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Reads changes from a table's change stream. Changes will - reflect both user-initiated mutations and mutations that - are caused by garbage collection. + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. Args: request (Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Request message for - Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. table_name (str): Required. The unique name of the table from which to read a change stream. Values are of the form @@ -1466,9 +1464,8 @@ def read_change_stream( Returns: Iterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Response message for - Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. Response + message for Bigtable.ReadChangeStream. """ # Create or coerce a protobuf request object. diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 0e0666242..8ba04e761 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -481,10 +481,9 @@ def read_change_stream( r"""Return a callable for the read change stream method over gRPC. NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Reads changes from a table's change stream. Changes will - reflect both user-initiated mutations and mutations that - are caused by garbage collection. + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. Returns: Callable[[~.ReadChangeStreamRequest], diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 49259969b..2c0cbdad6 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -490,10 +490,9 @@ def read_change_stream( r"""Return a callable for the read change stream method over gRPC. NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Reads changes from a table's change stream. Changes will - reflect both user-initiated mutations and mutations that - are caused by garbage collection. + BigtableIO. Reads changes from a table's change stream. + Changes will reflect both user-initiated mutations and + mutations that are caused by garbage collection. Returns: Callable[[~.ReadChangeStreamRequest], diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index 066b35e2a..31d230f94 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -571,8 +571,8 @@ def __call__( Args: request (~.bigtable.GenerateInitialChangeStreamPartitionsRequest): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Request message for + by Apache Beam BigtableIO. Request + message for Bigtable.GenerateInitialChangeStreamPartitions. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. @@ -583,8 +583,8 @@ def __call__( Returns: ~.bigtable.GenerateInitialChangeStreamPartitionsResponse: NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Response message for + by Apache Beam BigtableIO. Response + message for Bigtable.GenerateInitialChangeStreamPartitions. """ @@ -975,9 +975,8 @@ def __call__( Args: request (~.bigtable.ReadChangeStreamRequest): The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Request message for - Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. Request + message for Bigtable.ReadChangeStream. retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. @@ -987,9 +986,8 @@ def __call__( Returns: ~.bigtable.ReadChangeStreamResponse: NOTE: This API is intended to be used - by Apache Beam BigtableIO. - Response message for - Bigtable.ReadChangeStream. + by Apache Beam BigtableIO. Response + message for Bigtable.ReadChangeStream. """ diff --git a/google/cloud/bigtable_v2/types/bigtable.py b/google/cloud/bigtable_v2/types/bigtable.py index 42740eee2..57f806408 100644 --- a/google/cloud/bigtable_v2/types/bigtable.py +++ b/google/cloud/bigtable_v2/types/bigtable.py @@ -756,8 +756,7 @@ class ReadModifyWriteRowResponse(proto.Message): class GenerateInitialChangeStreamPartitionsRequest(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Request message for + BigtableIO. Request message for Bigtable.GenerateInitialChangeStreamPartitions. Attributes: @@ -785,8 +784,7 @@ class GenerateInitialChangeStreamPartitionsRequest(proto.Message): class GenerateInitialChangeStreamPartitionsResponse(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Response message for + BigtableIO. Response message for Bigtable.GenerateInitialChangeStreamPartitions. Attributes: @@ -803,8 +801,7 @@ class GenerateInitialChangeStreamPartitionsResponse(proto.Message): class ReadChangeStreamRequest(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Request message for Bigtable.ReadChangeStream. + BigtableIO. Request message for Bigtable.ReadChangeStream. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. @@ -900,8 +897,7 @@ class ReadChangeStreamRequest(proto.Message): class ReadChangeStreamResponse(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - Response message for Bigtable.ReadChangeStream. + BigtableIO. Response message for Bigtable.ReadChangeStream. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. diff --git a/google/cloud/bigtable_v2/types/data.py b/google/cloud/bigtable_v2/types/data.py index 0e9e0bfe5..e37644a76 100644 --- a/google/cloud/bigtable_v2/types/data.py +++ b/google/cloud/bigtable_v2/types/data.py @@ -639,11 +639,10 @@ class Chain(proto.Message): Attributes: filters (MutableSequence[google.cloud.bigtable_v2.types.RowFilter]): The elements of "filters" are chained - together to process the input row: - - in row -> f(0) -> intermediate row -> f(1) -> - ... -> f(N) -> out row The full chain is - executed atomically. + together to process the input row: in row -> + f(0) -> intermediate row -> f(1) -> ... -> f(N) + -> out row The full chain is executed + atomically. """ filters: MutableSequence["RowFilter"] = proto.RepeatedField( @@ -698,6 +697,7 @@ class Condition(proto.Message): r"""A RowFilter which evaluates one of two possible RowFilters, depending on whether or not a predicate RowFilter outputs any cells from the input row. + IMPORTANT NOTE: The predicate filter does not execute atomically with the true and false filters, which may lead to inconsistent or unexpected results. Additionally, Condition filters have poor @@ -1042,8 +1042,7 @@ class ReadModifyWriteRule(proto.Message): class StreamPartition(proto.Message): r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. - A partition of a change stream. + BigtableIO. A partition of a change stream. Attributes: row_range (google.cloud.bigtable_v2.types.RowRange): diff --git a/google/cloud/bigtable_v2/types/feature_flags.py b/google/cloud/bigtable_v2/types/feature_flags.py index c7a64db5e..92ac5023d 100644 --- a/google/cloud/bigtable_v2/types/feature_flags.py +++ b/google/cloud/bigtable_v2/types/feature_flags.py @@ -29,14 +29,14 @@ class FeatureFlags(proto.Message): - r"""Feature flags supported by a client. This is intended to be sent as - part of request metadata to assure the server that certain behaviors - are safe to enable. This proto is meant to be serialized and - websafe-base64 encoded under the ``bigtable-features`` metadata key. - The value will remain constant for the lifetime of a client and due - to HTTP2's HPACK compression, the request overhead will be tiny. - This is an internal implementation detail and should not be used by - endusers directly. + r"""Feature flags supported or enabled by a client. This is intended to + be sent as part of request metadata to assure the server that + certain behaviors are safe to enable. This proto is meant to be + serialized and websafe-base64 encoded under the + ``bigtable-features`` metadata key. The value will remain constant + for the lifetime of a client and due to HTTP2's HPACK compression, + the request overhead will be tiny. This is an internal + implementation detail and should not be used by end users directly. Attributes: reverse_scans (bool): @@ -47,11 +47,18 @@ class FeatureFlags(proto.Message): mutate_rows_rate_limit (bool): Notify the server that the client enables batch write flow control by requesting - RateLimitInfo from MutateRowsResponse. + RateLimitInfo from MutateRowsResponse. Due to + technical reasons, this disables partial + retries. + mutate_rows_rate_limit2 (bool): + Notify the server that the client enables + batch write flow control by requesting + RateLimitInfo from MutateRowsResponse. With + partial retries enabled. last_scanned_row_responses (bool): Notify the server that the client supports the last_scanned_row field in ReadRowsResponse for long-running - sparse scans. + scans. """ reverse_scans: bool = proto.Field( @@ -62,6 +69,10 @@ class FeatureFlags(proto.Message): proto.BOOL, number=3, ) + mutate_rows_rate_limit2: bool = proto.Field( + proto.BOOL, + number=5, + ) last_scanned_row_responses: bool = proto.Field( proto.BOOL, number=4, diff --git a/google/cloud/bigtable_v2/types/request_stats.py b/google/cloud/bigtable_v2/types/request_stats.py index 27c2bb028..61cce9491 100644 --- a/google/cloud/bigtable_v2/types/request_stats.py +++ b/google/cloud/bigtable_v2/types/request_stats.py @@ -86,6 +86,7 @@ class RequestLatencyStats(proto.Message): response. For more context on the component that is measuring this latency, see: https://cloud.google.com/bigtable/docs/overview + Note: This value may be slightly shorter than the value reported into aggregate latency metrics in Monitoring for this request diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index f5f5fc514..b2caa98ba 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -63,7 +63,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account from google.protobuf import field_mask_pb2 # type: ignore from google.protobuf import timestamp_pb2 # type: ignore diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index a537cb9f0..aa717a3cb 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -62,7 +62,7 @@ from google.iam.v1 import iam_policy_pb2 # type: ignore from google.iam.v1 import options_pb2 # type: ignore from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 +from google.longrunning import operations_pb2 # type: ignore from google.oauth2 import service_account from google.protobuf import any_pb2 # type: ignore from google.protobuf import duration_pb2 # type: ignore From a4d551e34006202ee96a395a2107d7acdc5881de Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 21:31:32 -0400 Subject: [PATCH 40/52] feat: Add support for Cloud Bigtable Request Priorities in App Profiles (#871) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Add support for Cloud Bigtable Request Priorities in App Profiles PiperOrigin-RevId: 571158646 Source-Link: https://github.com/googleapis/googleapis/commit/bc3c83b41b1589cca21f713a500f179ef86a7e18 Source-Link: https://github.com/googleapis/googleapis-gen/commit/93366e84e4e6861e2e580eb000721d99bf54a0a4 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiOTMzNjZlODRlNGU2ODYxZTJlNTgwZWIwMDA3MjFkOTliZjU0YTBhNCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../cloud/bigtable_admin_v2/types/instance.py | 65 ++++++++++++++++++- .../test_bigtable_instance_admin.py | 12 ++++ 2 files changed, 76 insertions(+), 1 deletion(-) diff --git a/google/cloud/bigtable_admin_v2/types/instance.py b/google/cloud/bigtable_admin_v2/types/instance.py index 6ae9159d0..78efd711b 100644 --- a/google/cloud/bigtable_admin_v2/types/instance.py +++ b/google/cloud/bigtable_admin_v2/types/instance.py @@ -173,7 +173,7 @@ class AutoscalingTargets(proto.Message): The storage utilization that the Autoscaler should be trying to achieve. This number is limited between 2560 (2.5TiB) and 5120 (5TiB) for a SSD cluster and between 8192 (8TiB) and - 16384 (16TiB) for an HDD cluster; otherwise it will return + 16384 (16TiB) for an HDD cluster, otherwise it will return INVALID_ARGUMENT error. If this value is set to 0, it will be treated as if it were set to the default value: 2560 for SSD, 8192 for HDD. @@ -419,8 +419,43 @@ class AppProfile(proto.Message): Use a single-cluster routing policy. This field is a member of `oneof`_ ``routing_policy``. + priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority): + This field has been deprecated in favor of + ``standard_isolation.priority``. If you set this field, + ``standard_isolation.priority`` will be set instead. + + The priority of requests sent using this app profile. + + This field is a member of `oneof`_ ``isolation``. + standard_isolation (google.cloud.bigtable_admin_v2.types.AppProfile.StandardIsolation): + The standard options used for isolating this + app profile's traffic from other use cases. + + This field is a member of `oneof`_ ``isolation``. """ + class Priority(proto.Enum): + r"""Possible priorities for an app profile. Note that higher + priority writes can sometimes queue behind lower priority writes + to the same tablet, as writes must be strictly sequenced in the + durability log. + + Values: + PRIORITY_UNSPECIFIED (0): + Default value. Mapped to PRIORITY_HIGH (the legacy behavior) + on creation. + PRIORITY_LOW (1): + No description available. + PRIORITY_MEDIUM (2): + No description available. + PRIORITY_HIGH (3): + No description available. + """ + PRIORITY_UNSPECIFIED = 0 + PRIORITY_LOW = 1 + PRIORITY_MEDIUM = 2 + PRIORITY_HIGH = 3 + class MultiClusterRoutingUseAny(proto.Message): r"""Read/write requests are routed to the nearest cluster in the instance, and will fail over to the nearest cluster that is @@ -466,6 +501,22 @@ class SingleClusterRouting(proto.Message): number=2, ) + class StandardIsolation(proto.Message): + r"""Standard options for isolating this app profile's traffic + from other use cases. + + Attributes: + priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority): + The priority of requests sent using this app + profile. + """ + + priority: "AppProfile.Priority" = proto.Field( + proto.ENUM, + number=1, + enum="AppProfile.Priority", + ) + name: str = proto.Field( proto.STRING, number=1, @@ -490,6 +541,18 @@ class SingleClusterRouting(proto.Message): oneof="routing_policy", message=SingleClusterRouting, ) + priority: Priority = proto.Field( + proto.ENUM, + number=7, + oneof="isolation", + enum=Priority, + ) + standard_isolation: StandardIsolation = proto.Field( + proto.MESSAGE, + number=11, + oneof="isolation", + message=StandardIsolation, + ) class HotTablet(proto.Message): diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index b2caa98ba..b8508cab4 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -3524,6 +3524,7 @@ def test_create_app_profile(request_type, transport: str = "grpc"): name="name_value", etag="etag_value", description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, ) response = client.create_app_profile(request) @@ -3793,6 +3794,7 @@ def test_get_app_profile(request_type, transport: str = "grpc"): name="name_value", etag="etag_value", description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, ) response = client.get_app_profile(request) @@ -9323,6 +9325,8 @@ def test_create_app_profile_rest(request_type): "cluster_id": "cluster_id_value", "allow_transactional_writes": True, }, + "priority": 1, + "standard_isolation": {"priority": 1}, } request = request_type(**request_init) @@ -9333,6 +9337,7 @@ def test_create_app_profile_rest(request_type): name="name_value", etag="etag_value", description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, ) # Wrap the value into a proper Response obj @@ -9550,6 +9555,8 @@ def test_create_app_profile_rest_bad_request( "cluster_id": "cluster_id_value", "allow_transactional_writes": True, }, + "priority": 1, + "standard_isolation": {"priority": 1}, } request = request_type(**request_init) @@ -9655,6 +9662,7 @@ def test_get_app_profile_rest(request_type): name="name_value", etag="etag_value", description="description_value", + priority=instance.AppProfile.Priority.PRIORITY_LOW, ) # Wrap the value into a proper Response obj @@ -10283,6 +10291,8 @@ def test_update_app_profile_rest(request_type): "cluster_id": "cluster_id_value", "allow_transactional_writes": True, }, + "priority": 1, + "standard_isolation": {"priority": 1}, } request = request_type(**request_init) @@ -10489,6 +10499,8 @@ def test_update_app_profile_rest_bad_request( "cluster_id": "cluster_id_value", "allow_transactional_writes": True, }, + "priority": 1, + "standard_isolation": {"priority": 1}, } request = request_type(**request_init) From a1cef05f2f6f7cb2c0d87364db2d9a8d3d19dd8b Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Fri, 6 Oct 2023 21:36:09 -0400 Subject: [PATCH 41/52] chore: [autoapprove] bump cryptography from 41.0.3 to 41.0.4 (#868) Source-Link: https://github.com/googleapis/synthtool/commit/dede53ff326079b457cfb1aae5bbdc82cbb51dc3 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:fac304457974bb530cc5396abd4ab25d26a469cd3bc97cbfb18c8d4324c584eb Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 4 ++-- .gitignore | 1 + .kokoro/requirements.txt | 49 ++++++++++++++++++++------------------- 3 files changed, 28 insertions(+), 26 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index a3da1b0d4..a9bdb1b7a 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:3e3800bb100af5d7f9e810d48212b37812c1856d20ffeafb99ebe66461b61fc7 -# created: 2023-08-02T10:53:29.114535628Z + digest: sha256:fac304457974bb530cc5396abd4ab25d26a469cd3bc97cbfb18c8d4324c584eb +# created: 2023-10-02T21:31:03.517640371Z diff --git a/.gitignore b/.gitignore index b4243ced7..d083ea1dd 100644 --- a/.gitignore +++ b/.gitignore @@ -50,6 +50,7 @@ docs.metadata # Virtual environment env/ +venv/ # Test logs coverage.xml diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index 029bd342d..96d593c8c 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -113,30 +113,30 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==41.0.3 \ - --hash=sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306 \ - --hash=sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84 \ - --hash=sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47 \ - --hash=sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d \ - --hash=sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116 \ - --hash=sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207 \ - --hash=sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81 \ - --hash=sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087 \ - --hash=sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd \ - --hash=sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507 \ - --hash=sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858 \ - --hash=sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae \ - --hash=sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34 \ - --hash=sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906 \ - --hash=sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd \ - --hash=sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922 \ - --hash=sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7 \ - --hash=sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4 \ - --hash=sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574 \ - --hash=sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1 \ - --hash=sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c \ - --hash=sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e \ - --hash=sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de +cryptography==41.0.4 \ + --hash=sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67 \ + --hash=sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311 \ + --hash=sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8 \ + --hash=sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13 \ + --hash=sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143 \ + --hash=sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f \ + --hash=sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829 \ + --hash=sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd \ + --hash=sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397 \ + --hash=sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac \ + --hash=sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d \ + --hash=sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a \ + --hash=sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839 \ + --hash=sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e \ + --hash=sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6 \ + --hash=sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9 \ + --hash=sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860 \ + --hash=sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca \ + --hash=sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91 \ + --hash=sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d \ + --hash=sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714 \ + --hash=sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb \ + --hash=sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f # via # gcp-releasetool # secretstorage @@ -382,6 +382,7 @@ protobuf==3.20.3 \ # gcp-docuploader # gcp-releasetool # google-api-core + # googleapis-common-protos pyasn1==0.4.8 \ --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \ --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba From cf1a1466553bf3bcc5a571e7d79191ab7dde6a71 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 9 Oct 2023 11:05:02 -0400 Subject: [PATCH 42/52] chore: [autoapprove] Update `black` and `isort` to latest versions (#873) Source-Link: https://github.com/googleapis/synthtool/commit/0c7b0333f44b2b7075447f43a121a12d15a7b76a Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:08e34975760f002746b1d8c86fdc90660be45945ee6d9db914d1508acdf9a547 Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 4 ++-- .kokoro/requirements.txt | 6 ++--- .pre-commit-config.yaml | 2 +- docs/snippets.py | 1 - docs/snippets_table.py | 1 - noxfile.py | 36 +++++++++++++++-------------- tests/system/test_instance_admin.py | 1 - tests/unit/test_batcher.py | 6 ----- tests/unit/test_cluster.py | 4 ---- tests/unit/test_column_family.py | 1 - tests/unit/test_instance.py | 1 - tests/unit/test_row_data.py | 2 -- tests/unit/test_table.py | 1 - 13 files changed, 25 insertions(+), 41 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index a9bdb1b7a..dd98abbde 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:fac304457974bb530cc5396abd4ab25d26a469cd3bc97cbfb18c8d4324c584eb -# created: 2023-10-02T21:31:03.517640371Z + digest: sha256:08e34975760f002746b1d8c86fdc90660be45945ee6d9db914d1508acdf9a547 +# created: 2023-10-09T14:06:13.397766266Z diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index 96d593c8c..0332d3267 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -467,9 +467,9 @@ typing-extensions==4.4.0 \ --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e # via -r requirements.in -urllib3==1.26.12 \ - --hash=sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e \ - --hash=sha256:b930dd878d5a8afb066a637fbb35144fe7901e3b209d1cd4f524bd0e9deee997 +urllib3==1.26.17 \ + --hash=sha256:24d6a242c28d29af46c3fae832c36db3bbebcc533dd1bb549172cd739c82df21 \ + --hash=sha256:94a757d178c9be92ef5539b8840d48dc9cf1b2709c9d6b588232a055c524458b # via # requests # twine diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 19409cbd3..6a8e16950 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -22,7 +22,7 @@ repos: - id: end-of-file-fixer - id: check-yaml - repo: https://github.com/psf/black - rev: 22.3.0 + rev: 23.7.0 hooks: - id: black - repo: https://github.com/pycqa/flake8 diff --git a/docs/snippets.py b/docs/snippets.py index 1d93fdf12..fa3aa3627 100644 --- a/docs/snippets.py +++ b/docs/snippets.py @@ -448,7 +448,6 @@ def test_bigtable_create_table(): def test_bigtable_list_tables(): - # [START bigtable_api_list_tables] from google.cloud.bigtable import Client diff --git a/docs/snippets_table.py b/docs/snippets_table.py index f27260425..893135275 100644 --- a/docs/snippets_table.py +++ b/docs/snippets_table.py @@ -964,7 +964,6 @@ def test_bigtable_create_family_gc_nested(): def test_bigtable_row_data_cells_cell_value_cell_values(): - value = b"value_in_col1" row = Config.TABLE.row(b"row_key_1") row.set_cell( diff --git a/noxfile.py b/noxfile.py index b2820b309..456191016 100644 --- a/noxfile.py +++ b/noxfile.py @@ -17,22 +17,24 @@ # Generated by synthtool. DO NOT EDIT! from __future__ import absolute_import + import os import pathlib import re import shutil +from typing import Dict, List import warnings import nox FLAKE8_VERSION = "flake8==6.1.0" -BLACK_VERSION = "black==22.3.0" -ISORT_VERSION = "isort==5.10.1" +BLACK_VERSION = "black[jupyter]==23.7.0" +ISORT_VERSION = "isort==5.11.0" LINT_PATHS = ["docs", "google", "tests", "noxfile.py", "setup.py"] DEFAULT_PYTHON_VERSION = "3.8" -UNIT_TEST_PYTHON_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +UNIT_TEST_PYTHON_VERSIONS: List[str] = ["3.7", "3.8", "3.9", "3.10", "3.11"] UNIT_TEST_STANDARD_DEPENDENCIES = [ "mock", "asyncmock", @@ -40,23 +42,23 @@ "pytest-cov", "pytest-asyncio", ] -UNIT_TEST_EXTERNAL_DEPENDENCIES = [] -UNIT_TEST_LOCAL_DEPENDENCIES = [] -UNIT_TEST_DEPENDENCIES = [] -UNIT_TEST_EXTRAS = [] -UNIT_TEST_EXTRAS_BY_PYTHON = {} - -SYSTEM_TEST_PYTHON_VERSIONS = ["3.8"] -SYSTEM_TEST_STANDARD_DEPENDENCIES = [ +UNIT_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_LOCAL_DEPENDENCIES: List[str] = [] +UNIT_TEST_DEPENDENCIES: List[str] = [] +UNIT_TEST_EXTRAS: List[str] = [] +UNIT_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} + +SYSTEM_TEST_PYTHON_VERSIONS: List[str] = ["3.8"] +SYSTEM_TEST_STANDARD_DEPENDENCIES: List[str] = [ "mock", "pytest", "google-cloud-testutils", ] -SYSTEM_TEST_EXTERNAL_DEPENDENCIES = [] -SYSTEM_TEST_LOCAL_DEPENDENCIES = [] -SYSTEM_TEST_DEPENDENCIES = [] -SYSTEM_TEST_EXTRAS = [] -SYSTEM_TEST_EXTRAS_BY_PYTHON = {} +SYSTEM_TEST_EXTERNAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_LOCAL_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_DEPENDENCIES: List[str] = [] +SYSTEM_TEST_EXTRAS: List[str] = [] +SYSTEM_TEST_EXTRAS_BY_PYTHON: Dict[str, List[str]] = {} CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() @@ -71,6 +73,7 @@ "lint_setup_py", "blacken", "docs", + "format", ] # Error if a python version is missing @@ -201,7 +204,6 @@ def unit(session): def install_systemtest_dependencies(session, *constraints): - # Use pre-release gRPC for system tests. # Exclude version 1.52.0rc1 which has a known issue. # See https://github.com/grpc/grpc/issues/32163 diff --git a/tests/system/test_instance_admin.py b/tests/system/test_instance_admin.py index e5e311213..bd5c7e912 100644 --- a/tests/system/test_instance_admin.py +++ b/tests/system/test_instance_admin.py @@ -28,7 +28,6 @@ def _create_app_profile_helper( allow_transactional_writes=None, ignore_warnings=None, ): - app_profile = instance.app_profile( app_profile_id=app_profile_id, routing_policy_type=routing_policy_type, diff --git a/tests/unit/test_batcher.py b/tests/unit/test_batcher.py index 998748141..741d9f282 100644 --- a/tests/unit/test_batcher.py +++ b/tests/unit/test_batcher.py @@ -59,7 +59,6 @@ def callback_fn(response): def test_mutation_batcher_mutate_row(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table) as mutation_batcher: - rows = [ DirectRow(row_key=b"row_key"), DirectRow(row_key=b"row_key_2"), @@ -75,7 +74,6 @@ def test_mutation_batcher_mutate_row(): def test_mutation_batcher_mutate(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") row.set_cell("cf1", b"c1", 1) row.set_cell("cf1", b"c2", 2) @@ -98,7 +96,6 @@ def test_mutation_batcher_flush_w_no_rows(): def test_mutation_batcher_mutate_w_max_flush_count(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table, flush_count=3) as mutation_batcher: - row_1 = DirectRow(row_key=b"row_key_1") row_2 = DirectRow(row_key=b"row_key_2") row_3 = DirectRow(row_key=b"row_key_3") @@ -114,7 +111,6 @@ def test_mutation_batcher_mutate_w_max_flush_count(): def test_mutation_batcher_mutate_w_max_mutations(): table = _Table(TABLE_NAME) with MutationsBatcher(table=table) as mutation_batcher: - row = DirectRow(row_key=b"row_key") row.set_cell("cf1", b"c1", 1) row.set_cell("cf1", b"c2", 2) @@ -130,7 +126,6 @@ def test_mutation_batcher_mutate_w_max_row_bytes(): with MutationsBatcher( table=table, max_row_bytes=3 * 1024 * 1024 ) as mutation_batcher: - number_of_bytes = 1 * 1024 * 1024 max_value = b"1" * number_of_bytes @@ -168,7 +163,6 @@ def test_mutations_batcher_context_manager_flushed_when_closed(): with MutationsBatcher( table=table, max_row_bytes=3 * 1024 * 1024 ) as mutation_batcher: - number_of_bytes = 1 * 1024 * 1024 max_value = b"1" * number_of_bytes diff --git a/tests/unit/test_cluster.py b/tests/unit/test_cluster.py index cb0312b0c..65ed47437 100644 --- a/tests/unit/test_cluster.py +++ b/tests/unit/test_cluster.py @@ -752,7 +752,6 @@ def test_cluster_update_w_partial_autoscaling_config(): }, ] for config in cluster_config: - cluster = _make_cluster( CLUSTER_ID, instance, @@ -927,7 +926,6 @@ def test_cluster_disable_autoscaling(): def test_create_cluster_with_both_manual_and_autoscaling(): - from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.enums import StorageType @@ -955,7 +953,6 @@ def test_create_cluster_with_both_manual_and_autoscaling(): def test_create_cluster_with_partial_autoscaling_config(): - from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.enums import StorageType @@ -996,7 +993,6 @@ def test_create_cluster_with_partial_autoscaling_config(): def test_create_cluster_with_no_scaling_config(): - from google.cloud.bigtable.instance import Instance from google.cloud.bigtable.enums import StorageType diff --git a/tests/unit/test_column_family.py b/tests/unit/test_column_family.py index b464024a7..80b05d744 100644 --- a/tests/unit/test_column_family.py +++ b/tests/unit/test_column_family.py @@ -595,7 +595,6 @@ def test__gc_rule_from_pb_unknown_field_name(): from google.cloud.bigtable.column_family import _gc_rule_from_pb class MockProto(object): - names = [] _pb = {} diff --git a/tests/unit/test_instance.py b/tests/unit/test_instance.py index c577adca5..797e4bd9c 100644 --- a/tests/unit/test_instance.py +++ b/tests/unit/test_instance.py @@ -67,7 +67,6 @@ def _make_instance(*args, **kwargs): def test_instance_constructor_defaults(): - client = object() instance = _make_instance(INSTANCE_ID, client) assert instance.instance_id == INSTANCE_ID diff --git a/tests/unit/test_row_data.py b/tests/unit/test_row_data.py index fba69ceba..9f2c40a54 100644 --- a/tests/unit/test_row_data.py +++ b/tests/unit/test_row_data.py @@ -1118,7 +1118,6 @@ def test_RRRM_build_updated_request_row_ranges_valid(): class _MockCancellableIterator(object): - cancel_calls = 0 def __init__(self, *values): @@ -1199,5 +1198,4 @@ def _read_rows_retry_exception(exc): class _Client(object): - data_stub = None diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index 3d7d2e8ee..f2dc14485 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -1689,7 +1689,6 @@ def _do_mutate_retryable_rows_helper( expected_entries = [] for row, prior_status in zip(rows, worker.responses_statuses): - if prior_status is None or prior_status.code in RETRYABLES: mutations = row._get_mutations().copy() # row clears on success entry = data_messages_v2_pb2.MutateRowsRequest.Entry( From 6070a22ffe55abc7ca6a724ab17409fc34d0a4f9 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Sat, 28 Oct 2023 00:15:28 +0000 Subject: [PATCH 43/52] chore: Update gapic-generator-python to v1.11.9 (#874) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore: Update gapic-generator-python to v1.11.7 PiperOrigin-RevId: 573230664 Source-Link: https://github.com/googleapis/googleapis/commit/93beed334607e70709cc60e6145be65fdc8ec386 Source-Link: https://github.com/googleapis/googleapis-gen/commit/f4a4edaa8057639fcf6adf9179872280d1a8f651 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZjRhNGVkYWE4MDU3NjM5ZmNmNmFkZjkxNzk4NzIyODBkMWE4ZjY1MSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.11.8 PiperOrigin-RevId: 574178735 Source-Link: https://github.com/googleapis/googleapis/commit/7307199008ee2d57a4337066de29f9cd8c444bc6 Source-Link: https://github.com/googleapis/googleapis-gen/commit/ce3af21b7c559a87c2befc076be0e3aeda3a26f0 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiY2UzYWYyMWI3YzU1OWE4N2MyYmVmYzA3NmJlMGUzYWVkYTNhMjZmMCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * chore: Update gapic-generator-python to v1.11.9 PiperOrigin-RevId: 574520922 Source-Link: https://github.com/googleapis/googleapis/commit/5183984d611beb41e90f65f08609b9d926f779bd Source-Link: https://github.com/googleapis/googleapis-gen/commit/a59af19d4ac6509faedf1cc39029141b6a5b8968 Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiYTU5YWYxOWQ0YWM2NTA5ZmFlZGYxY2MzOTAyOTE0MWI2YTViODk2OCJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- .../test_bigtable_instance_admin.py | 587 +++++++++++++----- .../test_bigtable_table_admin.py | 461 +++++++++----- tests/unit/gapic/bigtable_v2/test_bigtable.py | 135 ++-- 3 files changed, 820 insertions(+), 363 deletions(-) diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index b8508cab4..ddbf0032f 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -6454,8 +6454,9 @@ def test_get_instance_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6534,8 +6535,9 @@ def test_get_instance_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6658,8 +6660,9 @@ def test_get_instance_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6723,8 +6726,9 @@ def test_list_instances_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6804,10 +6808,11 @@ def test_list_instances_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -6934,8 +6939,9 @@ def test_list_instances_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7002,8 +7008,9 @@ def test_update_instance_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7081,8 +7088,9 @@ def test_update_instance_rest_required_fields(request_type=instance.Instance): response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Instance.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7210,6 +7218,75 @@ def test_partial_update_instance_rest(request_type): "create_time": {"seconds": 751, "nanos": 543}, "satisfies_pzs": True, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.PartialUpdateInstanceRequest.meta.fields[ + "instance" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["instance"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["instance"][field])): + del request_init["instance"][field][i][subfield] + else: + del request_init["instance"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -7390,15 +7467,6 @@ def test_partial_update_instance_rest_bad_request( # send a request that will satisfy transcoding request_init = {"instance": {"name": "projects/sample1/instances/sample2"}} - request_init["instance"] = { - "name": "projects/sample1/instances/sample2", - "display_name": "display_name_value", - "state": 1, - "type_": 1, - "labels": {}, - "create_time": {"seconds": 751, "nanos": 543}, - "satisfies_pzs": True, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -7761,6 +7829,73 @@ def test_create_cluster_rest(request_type): "default_storage_type": 1, "encryption_config": {"kms_key_name": "kms_key_name_value"}, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateClusterRequest.meta.fields["cluster"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -7959,26 +8094,6 @@ def test_create_cluster_rest_bad_request( # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["cluster"] = { - "name": "name_value", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } - }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -8088,8 +8203,9 @@ def test_get_cluster_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8168,8 +8284,9 @@ def test_get_cluster_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8292,8 +8409,9 @@ def test_get_cluster_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.Cluster.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8358,8 +8476,9 @@ def test_list_clusters_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8439,10 +8558,9 @@ def test_list_clusters_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListClustersResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8569,8 +8687,9 @@ def test_list_clusters_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8766,6 +8885,75 @@ def test_partial_update_cluster_rest(request_type): "default_storage_type": 1, "encryption_config": {"kms_key_name": "kms_key_name_value"}, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.PartialUpdateClusterRequest.meta.fields[ + "cluster" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["cluster"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["cluster"][field])): + del request_init["cluster"][field][i][subfield] + else: + del request_init["cluster"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -8948,26 +9136,6 @@ def test_partial_update_cluster_rest_bad_request( request_init = { "cluster": {"name": "projects/sample1/instances/sample2/clusters/sample3"} } - request_init["cluster"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3", - "location": "location_value", - "state": 1, - "serve_nodes": 1181, - "cluster_config": { - "cluster_autoscaling_config": { - "autoscaling_limits": { - "min_serve_nodes": 1600, - "max_serve_nodes": 1602, - }, - "autoscaling_targets": { - "cpu_utilization_percent": 2483, - "storage_utilization_gib_per_node": 3404, - }, - } - }, - "default_storage_type": 1, - "encryption_config": {"kms_key_name": "kms_key_name_value"}, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9328,6 +9496,75 @@ def test_create_app_profile_rest(request_type): "priority": 1, "standard_isolation": {"priority": 1}, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.CreateAppProfileRequest.meta.fields[ + "app_profile" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["app_profile"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["app_profile"][field])): + del request_init["app_profile"][field][i][subfield] + else: + del request_init["app_profile"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -9343,8 +9580,9 @@ def test_create_app_profile_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9436,8 +9674,9 @@ def test_create_app_profile_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9544,20 +9783,6 @@ def test_create_app_profile_rest_bad_request( # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/instances/sample2"} - request_init["app_profile"] = { - "name": "name_value", - "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, - "priority": 1, - "standard_isolation": {"priority": 1}, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9597,8 +9822,9 @@ def test_create_app_profile_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9668,8 +9894,9 @@ def test_get_app_profile_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9746,8 +9973,9 @@ def test_get_app_profile_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9872,8 +10100,9 @@ def test_get_app_profile_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = instance.AppProfile.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9939,10 +10168,9 @@ def test_list_app_profiles_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10025,10 +10253,11 @@ def test_list_app_profiles_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10163,10 +10392,9 @@ def test_list_app_profiles_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListAppProfilesResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10294,6 +10522,75 @@ def test_update_app_profile_rest(request_type): "priority": 1, "standard_isolation": {"priority": 1}, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_instance_admin.UpdateAppProfileRequest.meta.fields[ + "app_profile" + ] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["app_profile"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["app_profile"][field])): + del request_init["app_profile"][field][i][subfield] + else: + del request_init["app_profile"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -10488,20 +10785,6 @@ def test_update_app_profile_rest_bad_request( "name": "projects/sample1/instances/sample2/appProfiles/sample3" } } - request_init["app_profile"] = { - "name": "projects/sample1/instances/sample2/appProfiles/sample3", - "etag": "etag_value", - "description": "description_value", - "multi_cluster_routing_use_any": { - "cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"] - }, - "single_cluster_routing": { - "cluster_id": "cluster_id_value", - "allow_transactional_writes": True, - }, - "priority": 1, - "standard_isolation": {"priority": 1}, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -10890,8 +11173,7 @@ def test_get_iam_policy_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10968,8 +11250,7 @@ def test_get_iam_policy_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11090,8 +11371,7 @@ def test_get_iam_policy_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11157,8 +11437,7 @@ def test_set_iam_policy_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11235,8 +11514,7 @@ def test_set_iam_policy_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11365,8 +11643,7 @@ def test_set_iam_policy_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11431,8 +11708,7 @@ def test_test_iam_permissions_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11512,8 +11788,7 @@ def test_test_iam_permissions_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11645,8 +11920,7 @@ def test_test_iam_permissions_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11712,10 +11986,9 @@ def test_list_hot_tablets_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11799,10 +12072,11 @@ def test_list_hot_tablets_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11941,10 +12215,9 @@ def test_list_hot_tablets_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_instance_admin.ListHotTabletsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index aa717a3cb..b29dc5106 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -7285,8 +7285,9 @@ def test_create_table_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7368,8 +7369,9 @@ def test_create_table_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7503,8 +7505,9 @@ def test_create_table_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = gba_table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7856,8 +7859,9 @@ def test_list_tables_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -7940,8 +7944,9 @@ def test_list_tables_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8075,8 +8080,9 @@ def test_list_tables_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8199,8 +8205,9 @@ def test_get_table_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8279,8 +8286,9 @@ def test_get_table_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8403,8 +8411,9 @@ def test_get_table_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -8476,6 +8485,73 @@ def test_update_table_rest(request_type): "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, "deletion_protection": True, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["table"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["table"][field])): + del request_init["table"][field][i][subfield] + else: + del request_init["table"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -8657,24 +8733,6 @@ def test_update_table_rest_bad_request( request_init = { "table": {"name": "projects/sample1/instances/sample2/tables/sample3"} } - request_init["table"] = { - "name": "projects/sample1/instances/sample2/tables/sample3", - "cluster_states": {}, - "column_families": {}, - "granularity": 1, - "restore_info": { - "source_type": 1, - "backup_info": { - "backup": "backup_value", - "start_time": {"seconds": 751, "nanos": 543}, - "end_time": {}, - "source_table": "source_table_value", - "source_backup": "source_backup_value", - }, - }, - "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, - "deletion_protection": True, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -9299,8 +9357,9 @@ def test_modify_column_families_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9378,8 +9437,9 @@ def test_modify_column_families_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9516,8 +9576,9 @@ def test_modify_column_families_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Table.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9786,10 +9847,11 @@ def test_generate_consistency_token_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9865,10 +9927,11 @@ def test_generate_consistency_token_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -9996,10 +10059,11 @@ def test_generate_consistency_token_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10064,8 +10128,9 @@ def test_check_consistency_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10145,10 +10210,11 @@ def test_check_consistency_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb( + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10284,8 +10350,9 @@ def test_check_consistency_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10643,8 +10710,9 @@ def test_get_snapshot_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10722,8 +10790,9 @@ def test_get_snapshot_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10850,8 +10919,9 @@ def test_get_snapshot_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Snapshot.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10916,8 +10986,9 @@ def test_list_snapshots_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -10999,10 +11070,9 @@ def test_list_snapshots_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb( - return_value - ) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11137,8 +11207,9 @@ def test_list_snapshots_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11533,6 +11604,73 @@ def test_create_backup_rest(request_type): "kms_key_version": "kms_key_version_value", }, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.CreateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -11731,30 +11869,6 @@ def test_create_backup_rest_bad_request( # send a request that will satisfy transcoding request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} - request_init["backup"] = { - "name": "name_value", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -11869,8 +11983,9 @@ def test_get_backup_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -11949,8 +12064,9 @@ def test_get_backup_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12077,8 +12193,9 @@ def test_get_backup_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12159,6 +12276,73 @@ def test_update_backup_rest(request_type): "kms_key_version": "kms_key_version_value", }, } + # The version of a generated dependency at test runtime may differ from the version used during generation. + # Delete any fields which are not present in the current runtime dependency + # See https://github.com/googleapis/gapic-generator-python/issues/1748 + + # Determine if the message type is proto-plus or protobuf + test_field = bigtable_table_admin.UpdateBackupRequest.meta.fields["backup"] + + def get_message_fields(field): + # Given a field which is a message (composite type), return a list with + # all the fields of the message. + # If the field is not a composite type, return an empty list. + message_fields = [] + + if hasattr(field, "message") and field.message: + is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") + + if is_field_type_proto_plus_type: + message_fields = field.message.meta.fields.values() + # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types + else: # pragma: NO COVER + message_fields = field.message.DESCRIPTOR.fields + return message_fields + + runtime_nested_fields = [ + (field.name, nested_field.name) + for field in get_message_fields(test_field) + for nested_field in get_message_fields(field) + ] + + subfields_not_in_runtime = [] + + # For each item in the sample request, create a list of sub fields which are not present at runtime + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for field, value in request_init["backup"].items(): # pragma: NO COVER + result = None + is_repeated = False + # For repeated fields + if isinstance(value, list) and len(value): + is_repeated = True + result = value[0] + # For fields where the type is another message + if isinstance(value, dict): + result = value + + if result and hasattr(result, "keys"): + for subfield in result.keys(): + if (field, subfield) not in runtime_nested_fields: + subfields_not_in_runtime.append( + { + "field": field, + "subfield": subfield, + "is_repeated": is_repeated, + } + ) + + # Remove fields from the sample request which are not present in the runtime version of the dependency + # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime + for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER + field = subfield_to_delete.get("field") + field_repeated = subfield_to_delete.get("is_repeated") + subfield = subfield_to_delete.get("subfield") + if subfield: + if field_repeated: + for i in range(0, len(request_init["backup"][field])): + del request_init["backup"][field][i][subfield] + else: + del request_init["backup"][field][subfield] request = request_type(**request_init) # Mock the http request call within the method and fake a response. @@ -12175,8 +12359,9 @@ def test_update_backup_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12253,8 +12438,9 @@ def test_update_backup_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12352,30 +12538,6 @@ def test_update_backup_rest_bad_request( "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4" } } - request_init["backup"] = { - "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", - "source_table": "source_table_value", - "source_backup": "source_backup_value", - "expire_time": {"seconds": 751, "nanos": 543}, - "start_time": {}, - "end_time": {}, - "size_bytes": 1089, - "state": 1, - "encryption_info": { - "encryption_type": 1, - "encryption_status": { - "code": 411, - "message": "message_value", - "details": [ - { - "type_url": "type.googleapis.com/google.protobuf.Duration", - "value": b"\x08\x0c\x10\xdb\x07", - } - ], - }, - "kms_key_version": "kms_key_version_value", - }, - } request = request_type(**request_init) # Mock the http request call within the method and fake a BadRequest error. @@ -12418,8 +12580,9 @@ def test_update_backup_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = table.Backup.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12744,8 +12907,9 @@ def test_list_backups_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12829,8 +12993,9 @@ def test_list_backups_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -12967,8 +13132,9 @@ def test_list_backups_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -13604,8 +13770,7 @@ def test_get_iam_policy_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -13682,8 +13847,7 @@ def test_get_iam_policy_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -13806,8 +13970,7 @@ def test_get_iam_policy_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -13873,8 +14036,7 @@ def test_set_iam_policy_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -13951,8 +14113,7 @@ def test_set_iam_policy_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -14083,8 +14244,7 @@ def test_set_iam_policy_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -14149,8 +14309,7 @@ def test_test_iam_permissions_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -14230,8 +14389,7 @@ def test_test_iam_permissions_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -14365,8 +14523,7 @@ def test_test_iam_permissions_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = return_value - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py index 597540d69..2319306d7 100644 --- a/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -3004,8 +3004,9 @@ def test_read_rows_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -3086,8 +3087,9 @@ def test_read_rows_rest_required_fields(request_type=bigtable.ReadRowsRequest): response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -3215,8 +3217,9 @@ def test_read_rows_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -3286,8 +3289,9 @@ def test_sample_row_keys_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -3372,8 +3376,9 @@ def test_sample_row_keys_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -3501,8 +3506,9 @@ def test_sample_row_keys_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.SampleRowKeysResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -3569,8 +3575,9 @@ def test_mutate_row_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -3647,8 +3654,9 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -3787,8 +3795,9 @@ def test_mutate_row_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.MutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -3858,8 +3867,9 @@ def test_mutate_rows_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -3939,8 +3949,9 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -4077,8 +4088,9 @@ def test_mutate_rows_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.MutateRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.MutateRowsResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4148,8 +4160,9 @@ def test_check_and_mutate_row_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4229,8 +4242,9 @@ def test_check_and_mutate_row_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4386,8 +4400,9 @@ def test_check_and_mutate_row_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4473,8 +4488,9 @@ def test_ping_and_warm_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.PingAndWarmResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4547,8 +4563,9 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.PingAndWarmResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4670,8 +4687,9 @@ def test_ping_and_warm_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.PingAndWarmResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.PingAndWarmResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4733,8 +4751,9 @@ def test_read_modify_write_row_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4813,8 +4832,9 @@ def test_read_modify_write_row_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -4951,8 +4971,9 @@ def test_read_modify_write_row_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -5018,10 +5039,11 @@ def test_generate_initial_change_stream_partitions_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + # Convert return value to protobuf type + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -5107,10 +5129,11 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + # Convert return value to protobuf type + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -5249,10 +5272,11 @@ def test_generate_initial_change_stream_partitions_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( + # Convert return value to protobuf type + return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb( return_value ) - json_return_value = json_format.MessageToJson(pb_return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value @@ -5321,8 +5345,9 @@ def test_read_change_stream_rest(request_type): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) @@ -5404,8 +5429,9 @@ def test_read_change_stream_rest_required_fields( response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") @@ -5535,8 +5561,9 @@ def test_read_change_stream_rest_flattened(): # Wrap the value into a proper Response obj response_value = Response() response_value.status_code = 200 - pb_return_value = bigtable.ReadChangeStreamResponse.pb(return_value) - json_return_value = json_format.MessageToJson(pb_return_value) + # Convert return value to protobuf type + return_value = bigtable.ReadChangeStreamResponse.pb(return_value) + json_return_value = json_format.MessageToJson(return_value) json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value From 343d1dd75a416d1721721f78b2aa29fae6963562 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Mon, 30 Oct 2023 07:26:59 -0400 Subject: [PATCH 44/52] chore: rename rst files to avoid conflict with service names (#877) Source-Link: https://github.com/googleapis/synthtool/commit/d52e638b37b091054c869bfa6f5a9fedaba9e0dd Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:4f9b3b106ad0beafc2c8a415e3f62c1a0cc23cabea115dbe841b848f581cfe99 Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 4 ++-- .kokoro/requirements.txt | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index dd98abbde..7f291dbd5 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:08e34975760f002746b1d8c86fdc90660be45945ee6d9db914d1508acdf9a547 -# created: 2023-10-09T14:06:13.397766266Z + digest: sha256:4f9b3b106ad0beafc2c8a415e3f62c1a0cc23cabea115dbe841b848f581cfe99 +# created: 2023-10-18T20:26:37.410353675Z diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index 0332d3267..16170d0ca 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -467,9 +467,9 @@ typing-extensions==4.4.0 \ --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e # via -r requirements.in -urllib3==1.26.17 \ - --hash=sha256:24d6a242c28d29af46c3fae832c36db3bbebcc533dd1bb549172cd739c82df21 \ - --hash=sha256:94a757d178c9be92ef5539b8840d48dc9cf1b2709c9d6b588232a055c524458b +urllib3==1.26.18 \ + --hash=sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07 \ + --hash=sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0 # via # requests # twine From cc2e82bd9abcecb60f4a31101ca0f4ce6589ad47 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Thu, 2 Nov 2023 21:20:47 -0400 Subject: [PATCH 45/52] chore: update docfx minimum Python version (#884) Source-Link: https://github.com/googleapis/synthtool/commit/bc07fd415c39853b382bcf8315f8eeacdf334055 Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:30470597773378105e239b59fce8eb27cc97375580d592699206d17d117143d0 Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 4 ++-- .github/workflows/docs.yml | 2 +- noxfile.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 7f291dbd5..ec696b558 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:4f9b3b106ad0beafc2c8a415e3f62c1a0cc23cabea115dbe841b848f581cfe99 -# created: 2023-10-18T20:26:37.410353675Z + digest: sha256:30470597773378105e239b59fce8eb27cc97375580d592699206d17d117143d0 +# created: 2023-11-03T00:57:07.335914631Z diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index e97d89e48..221806ced 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -28,7 +28,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v4 with: - python-version: "3.9" + python-version: "3.10" - name: Install nox run: | python -m pip install --upgrade setuptools pip wheel diff --git a/noxfile.py b/noxfile.py index 456191016..fafee6ac4 100644 --- a/noxfile.py +++ b/noxfile.py @@ -342,7 +342,7 @@ def docs(session): ) -@nox.session(python="3.9") +@nox.session(python="3.10") def docfx(session): """Build the docfx yaml files for this library.""" From c8244bb44929f0db7533ebff0599ec63c9e596a0 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 15 Nov 2023 02:32:20 -0500 Subject: [PATCH 46/52] chore: bump urllib3 from 1.26.12 to 1.26.18 (#885) Source-Link: https://github.com/googleapis/synthtool/commit/febacccc98d6d224aff9d0bd0373bb5a4cd5969c Post-Processor: gcr.io/cloud-devrel-public-resources/owlbot-python:latest@sha256:caffe0a9277daeccc4d1de5c9b55ebba0901b57c2f713ec9c876b0d4ec064f61 Co-authored-by: Owl Bot --- .github/.OwlBot.lock.yaml | 4 +- .kokoro/requirements.txt | 532 ++++++++++++++++++++------------------ 2 files changed, 277 insertions(+), 259 deletions(-) diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index ec696b558..453b540c1 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:30470597773378105e239b59fce8eb27cc97375580d592699206d17d117143d0 -# created: 2023-11-03T00:57:07.335914631Z + digest: sha256:caffe0a9277daeccc4d1de5c9b55ebba0901b57c2f713ec9c876b0d4ec064f61 +# created: 2023-11-08T19:46:45.022803742Z diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index 16170d0ca..8957e2110 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -4,91 +4,75 @@ # # pip-compile --allow-unsafe --generate-hashes requirements.in # -argcomplete==2.0.0 \ - --hash=sha256:6372ad78c89d662035101418ae253668445b391755cfe94ea52f1b9d22425b20 \ - --hash=sha256:cffa11ea77999bb0dd27bb25ff6dc142a6796142f68d45b1a26b11f58724561e +argcomplete==3.1.4 \ + --hash=sha256:72558ba729e4c468572609817226fb0a6e7e9a0a7d477b882be168c0b4a62b94 \ + --hash=sha256:fbe56f8cda08aa9a04b307d8482ea703e96a6a801611acb4be9bf3942017989f # via nox -attrs==22.1.0 \ - --hash=sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6 \ - --hash=sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c +attrs==23.1.0 \ + --hash=sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04 \ + --hash=sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015 # via gcp-releasetool -bleach==5.0.1 \ - --hash=sha256:085f7f33c15bd408dd9b17a4ad77c577db66d76203e5984b1bd59baeee948b2a \ - --hash=sha256:0d03255c47eb9bd2f26aa9bb7f2107732e7e8fe195ca2f64709fcf3b0a4a085c - # via readme-renderer -cachetools==5.2.0 \ - --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \ - --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db +cachetools==5.3.2 \ + --hash=sha256:086ee420196f7b2ab9ca2db2520aca326318b68fe5ba8bc4d49cca91add450f2 \ + --hash=sha256:861f35a13a451f94e301ce2bec7cac63e881232ccce7ed67fab9b5df4d3beaa1 # via google-auth certifi==2023.7.22 \ --hash=sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082 \ --hash=sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9 # via requests -cffi==1.15.1 \ - --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ - --hash=sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef \ - --hash=sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104 \ - --hash=sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426 \ - --hash=sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405 \ - --hash=sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375 \ - --hash=sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a \ - --hash=sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e \ - --hash=sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc \ - --hash=sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf \ - --hash=sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185 \ - --hash=sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497 \ - --hash=sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3 \ - --hash=sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35 \ - --hash=sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c \ - --hash=sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83 \ - --hash=sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21 \ - --hash=sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca \ - --hash=sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984 \ - --hash=sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac \ - --hash=sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd \ - --hash=sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee \ - --hash=sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a \ - --hash=sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2 \ - --hash=sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192 \ - --hash=sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7 \ - --hash=sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585 \ - --hash=sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f \ - --hash=sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e \ - --hash=sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27 \ - --hash=sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b \ - --hash=sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e \ - --hash=sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e \ - --hash=sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d \ - --hash=sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c \ - --hash=sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415 \ - --hash=sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82 \ - --hash=sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02 \ - --hash=sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314 \ - --hash=sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325 \ - --hash=sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c \ - --hash=sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3 \ - --hash=sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914 \ - --hash=sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045 \ - --hash=sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d \ - --hash=sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9 \ - --hash=sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5 \ - --hash=sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2 \ - --hash=sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c \ - --hash=sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3 \ - --hash=sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2 \ - --hash=sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8 \ - --hash=sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d \ - --hash=sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d \ - --hash=sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9 \ - --hash=sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162 \ - --hash=sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76 \ - --hash=sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4 \ - --hash=sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e \ - --hash=sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9 \ - --hash=sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6 \ - --hash=sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b \ - --hash=sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01 \ - --hash=sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0 +cffi==1.16.0 \ + --hash=sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc \ + --hash=sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a \ + --hash=sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417 \ + --hash=sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab \ + --hash=sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520 \ + --hash=sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36 \ + --hash=sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743 \ + --hash=sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8 \ + --hash=sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed \ + --hash=sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684 \ + --hash=sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56 \ + --hash=sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324 \ + --hash=sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d \ + --hash=sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235 \ + --hash=sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e \ + --hash=sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088 \ + --hash=sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000 \ + --hash=sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7 \ + --hash=sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e \ + --hash=sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673 \ + --hash=sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c \ + --hash=sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe \ + --hash=sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2 \ + --hash=sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098 \ + --hash=sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8 \ + --hash=sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a \ + --hash=sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0 \ + --hash=sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b \ + --hash=sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896 \ + --hash=sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e \ + --hash=sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9 \ + --hash=sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2 \ + --hash=sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b \ + --hash=sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6 \ + --hash=sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404 \ + --hash=sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f \ + --hash=sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0 \ + --hash=sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4 \ + --hash=sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc \ + --hash=sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936 \ + --hash=sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba \ + --hash=sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872 \ + --hash=sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb \ + --hash=sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614 \ + --hash=sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1 \ + --hash=sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d \ + --hash=sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969 \ + --hash=sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b \ + --hash=sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4 \ + --hash=sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627 \ + --hash=sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956 \ + --hash=sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357 # via cryptography charset-normalizer==2.1.1 \ --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \ @@ -109,78 +93,74 @@ colorlog==6.7.0 \ # via # gcp-docuploader # nox -commonmark==0.9.1 \ - --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ - --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 - # via rich -cryptography==41.0.4 \ - --hash=sha256:004b6ccc95943f6a9ad3142cfabcc769d7ee38a3f60fb0dddbfb431f818c3a67 \ - --hash=sha256:047c4603aeb4bbd8db2756e38f5b8bd7e94318c047cfe4efeb5d715e08b49311 \ - --hash=sha256:0d9409894f495d465fe6fda92cb70e8323e9648af912d5b9141d616df40a87b8 \ - --hash=sha256:23a25c09dfd0d9f28da2352503b23e086f8e78096b9fd585d1d14eca01613e13 \ - --hash=sha256:2ed09183922d66c4ec5fdaa59b4d14e105c084dd0febd27452de8f6f74704143 \ - --hash=sha256:35c00f637cd0b9d5b6c6bd11b6c3359194a8eba9c46d4e875a3660e3b400005f \ - --hash=sha256:37480760ae08065437e6573d14be973112c9e6dcaf5f11d00147ee74f37a3829 \ - --hash=sha256:3b224890962a2d7b57cf5eeb16ccaafba6083f7b811829f00476309bce2fe0fd \ - --hash=sha256:5a0f09cefded00e648a127048119f77bc2b2ec61e736660b5789e638f43cc397 \ - --hash=sha256:5b72205a360f3b6176485a333256b9bcd48700fc755fef51c8e7e67c4b63e3ac \ - --hash=sha256:7e53db173370dea832190870e975a1e09c86a879b613948f09eb49324218c14d \ - --hash=sha256:7febc3094125fc126a7f6fb1f420d0da639f3f32cb15c8ff0dc3997c4549f51a \ - --hash=sha256:80907d3faa55dc5434a16579952ac6da800935cd98d14dbd62f6f042c7f5e839 \ - --hash=sha256:86defa8d248c3fa029da68ce61fe735432b047e32179883bdb1e79ed9bb8195e \ - --hash=sha256:8ac4f9ead4bbd0bc8ab2d318f97d85147167a488be0e08814a37eb2f439d5cf6 \ - --hash=sha256:93530900d14c37a46ce3d6c9e6fd35dbe5f5601bf6b3a5c325c7bffc030344d9 \ - --hash=sha256:9eeb77214afae972a00dee47382d2591abe77bdae166bda672fb1e24702a3860 \ - --hash=sha256:b5f4dfe950ff0479f1f00eda09c18798d4f49b98f4e2006d644b3301682ebdca \ - --hash=sha256:c3391bd8e6de35f6f1140e50aaeb3e2b3d6a9012536ca23ab0d9c35ec18c8a91 \ - --hash=sha256:c880eba5175f4307129784eca96f4e70b88e57aa3f680aeba3bab0e980b0f37d \ - --hash=sha256:cecfefa17042941f94ab54f769c8ce0fe14beff2694e9ac684176a2535bf9714 \ - --hash=sha256:e40211b4923ba5a6dc9769eab704bdb3fbb58d56c5b336d30996c24fcf12aadb \ - --hash=sha256:efc8ad4e6fc4f1752ebfb58aefece8b4e3c4cae940b0994d43649bdfce8d0d4f +cryptography==41.0.5 \ + --hash=sha256:0c327cac00f082013c7c9fb6c46b7cc9fa3c288ca702c74773968173bda421bf \ + --hash=sha256:0d2a6a598847c46e3e321a7aef8af1436f11c27f1254933746304ff014664d84 \ + --hash=sha256:227ec057cd32a41c6651701abc0328135e472ed450f47c2766f23267b792a88e \ + --hash=sha256:22892cc830d8b2c89ea60148227631bb96a7da0c1b722f2aac8824b1b7c0b6b8 \ + --hash=sha256:392cb88b597247177172e02da6b7a63deeff1937fa6fec3bbf902ebd75d97ec7 \ + --hash=sha256:3be3ca726e1572517d2bef99a818378bbcf7d7799d5372a46c79c29eb8d166c1 \ + --hash=sha256:573eb7128cbca75f9157dcde974781209463ce56b5804983e11a1c462f0f4e88 \ + --hash=sha256:580afc7b7216deeb87a098ef0674d6ee34ab55993140838b14c9b83312b37b86 \ + --hash=sha256:5a70187954ba7292c7876734183e810b728b4f3965fbe571421cb2434d279179 \ + --hash=sha256:73801ac9736741f220e20435f84ecec75ed70eda90f781a148f1bad546963d81 \ + --hash=sha256:7d208c21e47940369accfc9e85f0de7693d9a5d843c2509b3846b2db170dfd20 \ + --hash=sha256:8254962e6ba1f4d2090c44daf50a547cd5f0bf446dc658a8e5f8156cae0d8548 \ + --hash=sha256:88417bff20162f635f24f849ab182b092697922088b477a7abd6664ddd82291d \ + --hash=sha256:a48e74dad1fb349f3dc1d449ed88e0017d792997a7ad2ec9587ed17405667e6d \ + --hash=sha256:b948e09fe5fb18517d99994184854ebd50b57248736fd4c720ad540560174ec5 \ + --hash=sha256:c707f7afd813478e2019ae32a7c49cd932dd60ab2d2a93e796f68236b7e1fbf1 \ + --hash=sha256:d38e6031e113b7421db1de0c1b1f7739564a88f1684c6b89234fbf6c11b75147 \ + --hash=sha256:d3977f0e276f6f5bf245c403156673db103283266601405376f075c849a0b936 \ + --hash=sha256:da6a0ff8f1016ccc7477e6339e1d50ce5f59b88905585f77193ebd5068f1e797 \ + --hash=sha256:e270c04f4d9b5671ebcc792b3ba5d4488bf7c42c3c241a3748e2599776f29696 \ + --hash=sha256:e886098619d3815e0ad5790c973afeee2c0e6e04b4da90b88e6bd06e2a0b1b72 \ + --hash=sha256:ec3b055ff8f1dce8e6ef28f626e0972981475173d7973d63f271b29c8a2897da \ + --hash=sha256:fba1e91467c65fe64a82c689dc6cf58151158993b13eb7a7f3f4b7f395636723 # via # gcp-releasetool # secretstorage -distlib==0.3.6 \ - --hash=sha256:14bad2d9b04d3a36127ac97f30b12a19268f211063d8f8ee4f47108896e11b46 \ - --hash=sha256:f35c4b692542ca110de7ef0bea44d73981caeb34ca0b9b6b2e6d7790dda8f80e +distlib==0.3.7 \ + --hash=sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057 \ + --hash=sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8 # via virtualenv -docutils==0.19 \ - --hash=sha256:33995a6753c30b7f577febfc2c50411fec6aac7f7ffeb7c4cfe5991072dcf9e6 \ - --hash=sha256:5e1de4d849fee02c63b040a4a3fd567f4ab104defd8a5511fbbc24a8a017efbc +docutils==0.20.1 \ + --hash=sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6 \ + --hash=sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b # via readme-renderer -filelock==3.8.0 \ - --hash=sha256:55447caa666f2198c5b6b13a26d2084d26fa5b115c00d065664b2124680c4edc \ - --hash=sha256:617eb4e5eedc82fc5f47b6d61e4d11cb837c56cb4544e39081099fa17ad109d4 +filelock==3.13.1 \ + --hash=sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e \ + --hash=sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c # via virtualenv -gcp-docuploader==0.6.4 \ - --hash=sha256:01486419e24633af78fd0167db74a2763974765ee8078ca6eb6964d0ebd388af \ - --hash=sha256:70861190c123d907b3b067da896265ead2eeb9263969d6955c9e0bb091b5ccbf +gcp-docuploader==0.6.5 \ + --hash=sha256:30221d4ac3e5a2b9c69aa52fdbef68cc3f27d0e6d0d90e220fc024584b8d2318 \ + --hash=sha256:b7458ef93f605b9d46a4bf3a8dc1755dad1f31d030c8679edf304e343b347eea # via -r requirements.in -gcp-releasetool==1.10.5 \ - --hash=sha256:174b7b102d704b254f2a26a3eda2c684fd3543320ec239baf771542a2e58e109 \ - --hash=sha256:e29d29927fe2ca493105a82958c6873bb2b90d503acac56be2c229e74de0eec9 +gcp-releasetool==1.16.0 \ + --hash=sha256:27bf19d2e87aaa884096ff941aa3c592c482be3d6a2bfe6f06afafa6af2353e3 \ + --hash=sha256:a316b197a543fd036209d0caba7a8eb4d236d8e65381c80cbc6d7efaa7606d63 # via -r requirements.in -google-api-core==2.10.2 \ - --hash=sha256:10c06f7739fe57781f87523375e8e1a3a4674bf6392cd6131a3222182b971320 \ - --hash=sha256:34f24bd1d5f72a8c4519773d99ca6bf080a6c4e041b4e9f024fe230191dda62e +google-api-core==2.12.0 \ + --hash=sha256:c22e01b1e3c4dcd90998494879612c38d0a3411d1f7b679eb89e2abe3ce1f553 \ + --hash=sha256:ec6054f7d64ad13b41e43d96f735acbd763b0f3b695dabaa2d579673f6a6e160 # via # google-cloud-core # google-cloud-storage -google-auth==2.14.1 \ - --hash=sha256:ccaa901f31ad5cbb562615eb8b664b3dd0bf5404a67618e642307f00613eda4d \ - --hash=sha256:f5d8701633bebc12e0deea4df8abd8aff31c28b355360597f7f2ee60f2e4d016 +google-auth==2.23.4 \ + --hash=sha256:79905d6b1652187def79d491d6e23d0cbb3a21d3c7ba0dbaa9c8a01906b13ff3 \ + --hash=sha256:d4bbc92fe4b8bfd2f3e8d88e5ba7085935da208ee38a134fc280e7ce682a05f2 # via # gcp-releasetool # google-api-core # google-cloud-core # google-cloud-storage -google-cloud-core==2.3.2 \ - --hash=sha256:8417acf6466be2fa85123441696c4badda48db314c607cf1e5d543fa8bdc22fe \ - --hash=sha256:b9529ee7047fd8d4bf4a2182de619154240df17fbe60ead399078c1ae152af9a +google-cloud-core==2.3.3 \ + --hash=sha256:37b80273c8d7eee1ae816b3a20ae43585ea50506cb0e60f3cf5be5f87f1373cb \ + --hash=sha256:fbd11cad3e98a7e5b0343dc07cb1039a5ffd7a5bb96e1f1e27cee4bda4a90863 # via google-cloud-storage -google-cloud-storage==2.6.0 \ - --hash=sha256:104ca28ae61243b637f2f01455cc8a05e8f15a2a18ced96cb587241cdd3820f5 \ - --hash=sha256:4ad0415ff61abdd8bb2ae81c1f8f7ec7d91a1011613f2db87c614c550f97bfe9 +google-cloud-storage==2.13.0 \ + --hash=sha256:ab0bf2e1780a1b74cf17fccb13788070b729f50c252f0c94ada2aae0ca95437d \ + --hash=sha256:f62dc4c7b6cd4360d072e3deb28035fbdad491ac3d9b0b1815a12daea10f37c7 # via gcp-docuploader google-crc32c==1.5.0 \ --hash=sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a \ @@ -251,29 +231,31 @@ google-crc32c==1.5.0 \ --hash=sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183 \ --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \ --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4 - # via google-resumable-media -google-resumable-media==2.4.0 \ - --hash=sha256:2aa004c16d295c8f6c33b2b4788ba59d366677c0a25ae7382436cb30f776deaa \ - --hash=sha256:8d5518502f92b9ecc84ac46779bd4f09694ecb3ba38a3e7ca737a86d15cbca1f + # via + # google-cloud-storage + # google-resumable-media +google-resumable-media==2.6.0 \ + --hash=sha256:972852f6c65f933e15a4a210c2b96930763b47197cdf4aa5f5bea435efb626e7 \ + --hash=sha256:fc03d344381970f79eebb632a3c18bb1828593a2dc5572b5f90115ef7d11e81b # via google-cloud-storage -googleapis-common-protos==1.57.0 \ - --hash=sha256:27a849d6205838fb6cc3c1c21cb9800707a661bb21c6ce7fb13e99eb1f8a0c46 \ - --hash=sha256:a9f4a1d7f6d9809657b7f1316a1aa527f6664891531bcfcc13b6696e685f443c +googleapis-common-protos==1.61.0 \ + --hash=sha256:22f1915393bb3245343f6efe87f6fe868532efc12aa26b391b15132e1279f1c0 \ + --hash=sha256:8a64866a97f6304a7179873a465d6eee97b7a24ec6cfd78e0f575e96b821240b # via google-api-core idna==3.4 \ --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 # via requests -importlib-metadata==5.0.0 \ - --hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \ - --hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43 +importlib-metadata==6.8.0 \ + --hash=sha256:3ebb78df84a805d7698245025b975d9d67053cd94c79245ba4b3eb694abe68bb \ + --hash=sha256:dbace7892d8c0c4ac1ad096662232f831d4e64f4c4545bd53016a3e9d4654743 # via # -r requirements.in # keyring # twine -jaraco-classes==3.2.3 \ - --hash=sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158 \ - --hash=sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a +jaraco-classes==3.3.0 \ + --hash=sha256:10afa92b6743f25c0cf5f37c6bb6e18e2c5bb84a16527ccfc0040ea377e7aaeb \ + --hash=sha256:c063dd08e89217cee02c8d5e5ec560f2c8ce6cdc2fcdc2e68f7b2e5547ed3621 # via keyring jeepney==0.8.0 \ --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \ @@ -285,75 +267,121 @@ jinja2==3.1.2 \ --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 # via gcp-releasetool -keyring==23.11.0 \ - --hash=sha256:3dd30011d555f1345dec2c262f0153f2f0ca6bca041fb1dc4588349bb4c0ac1e \ - --hash=sha256:ad192263e2cdd5f12875dedc2da13534359a7e760e77f8d04b50968a821c2361 +keyring==24.2.0 \ + --hash=sha256:4901caaf597bfd3bbd78c9a0c7c4c29fcd8310dab2cffefe749e916b6527acd6 \ + --hash=sha256:ca0746a19ec421219f4d713f848fa297a661a8a8c1504867e55bfb5e09091509 # via # gcp-releasetool # twine -markupsafe==2.1.1 \ - --hash=sha256:0212a68688482dc52b2d45013df70d169f542b7394fc744c02a57374a4207003 \ - --hash=sha256:089cf3dbf0cd6c100f02945abeb18484bd1ee57a079aefd52cffd17fba910b88 \ - --hash=sha256:10c1bfff05d95783da83491be968e8fe789263689c02724e0c691933c52994f5 \ - --hash=sha256:33b74d289bd2f5e527beadcaa3f401e0df0a89927c1559c8566c066fa4248ab7 \ - --hash=sha256:3799351e2336dc91ea70b034983ee71cf2f9533cdff7c14c90ea126bfd95d65a \ - --hash=sha256:3ce11ee3f23f79dbd06fb3d63e2f6af7b12db1d46932fe7bd8afa259a5996603 \ - --hash=sha256:421be9fbf0ffe9ffd7a378aafebbf6f4602d564d34be190fc19a193232fd12b1 \ - --hash=sha256:43093fb83d8343aac0b1baa75516da6092f58f41200907ef92448ecab8825135 \ - --hash=sha256:46d00d6cfecdde84d40e572d63735ef81423ad31184100411e6e3388d405e247 \ - --hash=sha256:4a33dea2b688b3190ee12bd7cfa29d39c9ed176bda40bfa11099a3ce5d3a7ac6 \ - --hash=sha256:4b9fe39a2ccc108a4accc2676e77da025ce383c108593d65cc909add5c3bd601 \ - --hash=sha256:56442863ed2b06d19c37f94d999035e15ee982988920e12a5b4ba29b62ad1f77 \ - --hash=sha256:671cd1187ed5e62818414afe79ed29da836dde67166a9fac6d435873c44fdd02 \ - --hash=sha256:694deca8d702d5db21ec83983ce0bb4b26a578e71fbdbd4fdcd387daa90e4d5e \ - --hash=sha256:6a074d34ee7a5ce3effbc526b7083ec9731bb3cbf921bbe1d3005d4d2bdb3a63 \ - --hash=sha256:6d0072fea50feec76a4c418096652f2c3238eaa014b2f94aeb1d56a66b41403f \ - --hash=sha256:6fbf47b5d3728c6aea2abb0589b5d30459e369baa772e0f37a0320185e87c980 \ - --hash=sha256:7f91197cc9e48f989d12e4e6fbc46495c446636dfc81b9ccf50bb0ec74b91d4b \ - --hash=sha256:86b1f75c4e7c2ac2ccdaec2b9022845dbb81880ca318bb7a0a01fbf7813e3812 \ - --hash=sha256:8dc1c72a69aa7e082593c4a203dcf94ddb74bb5c8a731e4e1eb68d031e8498ff \ - --hash=sha256:8e3dcf21f367459434c18e71b2a9532d96547aef8a871872a5bd69a715c15f96 \ - --hash=sha256:8e576a51ad59e4bfaac456023a78f6b5e6e7651dcd383bcc3e18d06f9b55d6d1 \ - --hash=sha256:96e37a3dc86e80bf81758c152fe66dbf60ed5eca3d26305edf01892257049925 \ - --hash=sha256:97a68e6ada378df82bc9f16b800ab77cbf4b2fada0081794318520138c088e4a \ - --hash=sha256:99a2a507ed3ac881b975a2976d59f38c19386d128e7a9a18b7df6fff1fd4c1d6 \ - --hash=sha256:a49907dd8420c5685cfa064a1335b6754b74541bbb3706c259c02ed65b644b3e \ - --hash=sha256:b09bf97215625a311f669476f44b8b318b075847b49316d3e28c08e41a7a573f \ - --hash=sha256:b7bd98b796e2b6553da7225aeb61f447f80a1ca64f41d83612e6139ca5213aa4 \ - --hash=sha256:b87db4360013327109564f0e591bd2a3b318547bcef31b468a92ee504d07ae4f \ - --hash=sha256:bcb3ed405ed3222f9904899563d6fc492ff75cce56cba05e32eff40e6acbeaa3 \ - --hash=sha256:d4306c36ca495956b6d568d276ac11fdd9c30a36f1b6eb928070dc5360b22e1c \ - --hash=sha256:d5ee4f386140395a2c818d149221149c54849dfcfcb9f1debfe07a8b8bd63f9a \ - --hash=sha256:dda30ba7e87fbbb7eab1ec9f58678558fd9a6b8b853530e176eabd064da81417 \ - --hash=sha256:e04e26803c9c3851c931eac40c695602c6295b8d432cbe78609649ad9bd2da8a \ - --hash=sha256:e1c0b87e09fa55a220f058d1d49d3fb8df88fbfab58558f1198e08c1e1de842a \ - --hash=sha256:e72591e9ecd94d7feb70c1cbd7be7b3ebea3f548870aa91e2732960fa4d57a37 \ - --hash=sha256:e8c843bbcda3a2f1e3c2ab25913c80a3c5376cd00c6e8c4a86a89a28c8dc5452 \ - --hash=sha256:efc1913fd2ca4f334418481c7e595c00aad186563bbc1ec76067848c7ca0a933 \ - --hash=sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a \ - --hash=sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7 +markdown-it-py==3.0.0 \ + --hash=sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1 \ + --hash=sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb + # via rich +markupsafe==2.1.3 \ + --hash=sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e \ + --hash=sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e \ + --hash=sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431 \ + --hash=sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686 \ + --hash=sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c \ + --hash=sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559 \ + --hash=sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc \ + --hash=sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb \ + --hash=sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939 \ + --hash=sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c \ + --hash=sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0 \ + --hash=sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4 \ + --hash=sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9 \ + --hash=sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575 \ + --hash=sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba \ + --hash=sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d \ + --hash=sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd \ + --hash=sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3 \ + --hash=sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00 \ + --hash=sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155 \ + --hash=sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac \ + --hash=sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52 \ + --hash=sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f \ + --hash=sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8 \ + --hash=sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b \ + --hash=sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007 \ + --hash=sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24 \ + --hash=sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea \ + --hash=sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198 \ + --hash=sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0 \ + --hash=sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee \ + --hash=sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be \ + --hash=sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2 \ + --hash=sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1 \ + --hash=sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707 \ + --hash=sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6 \ + --hash=sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c \ + --hash=sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58 \ + --hash=sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823 \ + --hash=sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779 \ + --hash=sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636 \ + --hash=sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c \ + --hash=sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad \ + --hash=sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee \ + --hash=sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc \ + --hash=sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2 \ + --hash=sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48 \ + --hash=sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7 \ + --hash=sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e \ + --hash=sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b \ + --hash=sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa \ + --hash=sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5 \ + --hash=sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e \ + --hash=sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb \ + --hash=sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9 \ + --hash=sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57 \ + --hash=sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc \ + --hash=sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc \ + --hash=sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2 \ + --hash=sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11 # via jinja2 -more-itertools==9.0.0 \ - --hash=sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41 \ - --hash=sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab +mdurl==0.1.2 \ + --hash=sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8 \ + --hash=sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba + # via markdown-it-py +more-itertools==10.1.0 \ + --hash=sha256:626c369fa0eb37bac0291bce8259b332fd59ac792fa5497b59837309cd5b114a \ + --hash=sha256:64e0735fcfdc6f3464ea133afe8ea4483b1c5fe3a3d69852e6503b43a0b222e6 # via jaraco-classes -nox==2022.11.21 \ - --hash=sha256:0e41a990e290e274cb205a976c4c97ee3c5234441a8132c8c3fd9ea3c22149eb \ - --hash=sha256:e21c31de0711d1274ca585a2c5fde36b1aa962005ba8e9322bf5eeed16dcd684 +nh3==0.2.14 \ + --hash=sha256:116c9515937f94f0057ef50ebcbcc10600860065953ba56f14473ff706371873 \ + --hash=sha256:18415df36db9b001f71a42a3a5395db79cf23d556996090d293764436e98e8ad \ + --hash=sha256:203cac86e313cf6486704d0ec620a992c8bc164c86d3a4fd3d761dd552d839b5 \ + --hash=sha256:2b0be5c792bd43d0abef8ca39dd8acb3c0611052ce466d0401d51ea0d9aa7525 \ + --hash=sha256:377aaf6a9e7c63962f367158d808c6a1344e2b4f83d071c43fbd631b75c4f0b2 \ + --hash=sha256:525846c56c2bcd376f5eaee76063ebf33cf1e620c1498b2a40107f60cfc6054e \ + --hash=sha256:5529a3bf99402c34056576d80ae5547123f1078da76aa99e8ed79e44fa67282d \ + --hash=sha256:7771d43222b639a4cd9e341f870cee336b9d886de1ad9bec8dddab22fe1de450 \ + --hash=sha256:88c753efbcdfc2644a5012938c6b9753f1c64a5723a67f0301ca43e7b85dcf0e \ + --hash=sha256:93a943cfd3e33bd03f77b97baa11990148687877b74193bf777956b67054dcc6 \ + --hash=sha256:9be2f68fb9a40d8440cbf34cbf40758aa7f6093160bfc7fb018cce8e424f0c3a \ + --hash=sha256:a0c509894fd4dccdff557068e5074999ae3b75f4c5a2d6fb5415e782e25679c4 \ + --hash=sha256:ac8056e937f264995a82bf0053ca898a1cb1c9efc7cd68fa07fe0060734df7e4 \ + --hash=sha256:aed56a86daa43966dd790ba86d4b810b219f75b4bb737461b6886ce2bde38fd6 \ + --hash=sha256:e8986f1dd3221d1e741fda0a12eaa4a273f1d80a35e31a1ffe579e7c621d069e \ + --hash=sha256:f99212a81c62b5f22f9e7c3e347aa00491114a5647e1f13bbebd79c3e5f08d75 + # via readme-renderer +nox==2023.4.22 \ + --hash=sha256:0b1adc619c58ab4fa57d6ab2e7823fe47a32e70202f287d78474adcc7bda1891 \ + --hash=sha256:46c0560b0dc609d7d967dc99e22cb463d3c4caf54a5fda735d6c11b5177e3a9f # via -r requirements.in -packaging==21.3 \ - --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ - --hash=sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522 +packaging==23.2 \ + --hash=sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5 \ + --hash=sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7 # via # gcp-releasetool # nox -pkginfo==1.8.3 \ - --hash=sha256:848865108ec99d4901b2f7e84058b6e7660aae8ae10164e015a6dcf5b242a594 \ - --hash=sha256:a84da4318dd86f870a9447a8c98340aa06216bfc6f2b7bdc4b8766984ae1867c +pkginfo==1.9.6 \ + --hash=sha256:4b7a555a6d5a22169fcc9cf7bfd78d296b0361adad412a346c1226849af5e546 \ + --hash=sha256:8fd5896e8718a4372f0ea9cc9d96f6417c9b986e23a4d116dda26b62cc29d046 # via twine -platformdirs==2.5.4 \ - --hash=sha256:1006647646d80f16130f052404c6b901e80ee4ed6bef6792e1f238a8969106f7 \ - --hash=sha256:af0276409f9a02373d540bf8480021a048711d572745aef4b7842dad245eba10 +platformdirs==3.11.0 \ + --hash=sha256:cf8ee52a3afdb965072dcc652433e0c7e3e40cf5ea1477cd4b3b1d2eb75495b3 \ + --hash=sha256:e9d171d00af68be50e9202731309c4e658fd8bc76f55c11c7dd760d023bda68e # via virtualenv protobuf==3.20.3 \ --hash=sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7 \ @@ -383,34 +411,30 @@ protobuf==3.20.3 \ # gcp-releasetool # google-api-core # googleapis-common-protos -pyasn1==0.4.8 \ - --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \ - --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba +pyasn1==0.5.0 \ + --hash=sha256:87a2121042a1ac9358cabcaf1d07680ff97ee6404333bacca15f76aa8ad01a57 \ + --hash=sha256:97b7290ca68e62a832558ec3976f15cbf911bf5d7c7039d8b861c2a0ece69fde # via # pyasn1-modules # rsa -pyasn1-modules==0.2.8 \ - --hash=sha256:905f84c712230b2c592c19470d3ca8d552de726050d1d1716282a1f6146be65e \ - --hash=sha256:a50b808ffeb97cb3601dd25981f6b016cbb3d31fbf57a8b8a87428e6158d0c74 +pyasn1-modules==0.3.0 \ + --hash=sha256:5bd01446b736eb9d31512a30d46c1ac3395d676c6f3cafa4c03eb54b9925631c \ + --hash=sha256:d3ccd6ed470d9ffbc716be08bd90efbd44d0734bc9303818f7336070984a162d # via google-auth pycparser==2.21 \ --hash=sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9 \ --hash=sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206 # via cffi -pygments==2.15.0 \ - --hash=sha256:77a3299119af881904cd5ecd1ac6a66214b6e9bed1f2db16993b54adede64094 \ - --hash=sha256:f7e36cffc4c517fbc252861b9a6e4644ca0e5abadf9a113c72d1358ad09b9500 +pygments==2.16.1 \ + --hash=sha256:13fc09fa63bc8d8671a6d247e1eb303c4b343eaee81d861f3404db2935653692 \ + --hash=sha256:1daff0494820c69bc8941e407aa20f577374ee88364ee10a98fdbe0aece96e29 # via # readme-renderer # rich -pyjwt==2.6.0 \ - --hash=sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd \ - --hash=sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14 +pyjwt==2.8.0 \ + --hash=sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de \ + --hash=sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320 # via gcp-releasetool -pyparsing==3.0.9 \ - --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ - --hash=sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc - # via packaging pyperclip==1.8.2 \ --hash=sha256:105254a8b04934f0bc84e9c24eb360a591aaf6535c9def5f29d92af107a9bf57 # via gcp-releasetool @@ -418,9 +442,9 @@ python-dateutil==2.8.2 \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 # via gcp-releasetool -readme-renderer==37.3 \ - --hash=sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273 \ - --hash=sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343 +readme-renderer==42.0 \ + --hash=sha256:13d039515c1f24de668e2c93f2e877b9dbe6c6c32328b90a40a49d8b2b85f36d \ + --hash=sha256:2d55489f83be4992fe4454939d1a051c33edbab778e82761d060c9fc6b308cd1 # via twine requests==2.31.0 \ --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ @@ -431,17 +455,17 @@ requests==2.31.0 \ # google-cloud-storage # requests-toolbelt # twine -requests-toolbelt==0.10.1 \ - --hash=sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7 \ - --hash=sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d +requests-toolbelt==1.0.0 \ + --hash=sha256:7681a0a3d047012b5bdc0ee37d7f8f07ebe76ab08caeccfc3921ce23c88d5bc6 \ + --hash=sha256:cccfdd665f0a24fcf4726e690f65639d272bb0637b9b92dfd91a5568ccf6bd06 # via twine rfc3986==2.0.0 \ --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \ --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c # via twine -rich==12.6.0 \ - --hash=sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e \ - --hash=sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0 +rich==13.6.0 \ + --hash=sha256:2b38e2fe9ca72c9a00170a1a2d20c63c790d0e10ef1fe35eba76e1e7b1d7d245 \ + --hash=sha256:5c14d22737e6d5084ef4771b62d5d4363165b403455a30a1c8ca39dc7b644bef # via twine rsa==4.9 \ --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ @@ -455,43 +479,37 @@ six==1.16.0 \ --hash=sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926 \ --hash=sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254 # via - # bleach # gcp-docuploader - # google-auth # python-dateutil -twine==4.0.1 \ - --hash=sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e \ - --hash=sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0 +twine==4.0.2 \ + --hash=sha256:929bc3c280033347a00f847236564d1c52a3e61b1ac2516c97c48f3ceab756d8 \ + --hash=sha256:9e102ef5fdd5a20661eb88fad46338806c3bd32cf1db729603fe3697b1bc83c8 # via -r requirements.in -typing-extensions==4.4.0 \ - --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ - --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e +typing-extensions==4.8.0 \ + --hash=sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0 \ + --hash=sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef # via -r requirements.in -urllib3==1.26.18 \ - --hash=sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07 \ - --hash=sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0 +urllib3==2.0.7 \ + --hash=sha256:c97dfde1f7bd43a71c8d2a58e369e9b2bf692d1334ea9f9cae55add7d0dd0f84 \ + --hash=sha256:fdb6d215c776278489906c2f8916e6e7d4f5a9b602ccbcfdf7f016fc8da0596e # via # requests # twine -virtualenv==20.16.7 \ - --hash=sha256:8691e3ff9387f743e00f6bb20f70121f5e4f596cae754531f2b3b3a1b1ac696e \ - --hash=sha256:efd66b00386fdb7dbe4822d172303f40cd05e50e01740b19ea42425cbe653e29 +virtualenv==20.24.6 \ + --hash=sha256:02ece4f56fbf939dbbc33c0715159951d6bf14aaf5457b092e4548e1382455af \ + --hash=sha256:520d056652454c5098a00c0f073611ccbea4c79089331f60bf9d7ba247bb7381 # via nox -webencodings==0.5.1 \ - --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \ - --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923 - # via bleach -wheel==0.38.4 \ - --hash=sha256:965f5259b566725405b05e7cf774052044b1ed30119b5d586b2703aafe8719ac \ - --hash=sha256:b60533f3f5d530e971d6737ca6d58681ee434818fab630c83a734bb10c083ce8 +wheel==0.41.3 \ + --hash=sha256:488609bc63a29322326e05560731bf7bfea8e48ad646e1f5e40d366607de0942 \ + --hash=sha256:4d4987ce51a49370ea65c0bfd2234e8ce80a12780820d9dc462597a6e60d0841 # via -r requirements.in -zipp==3.10.0 \ - --hash=sha256:4fcb6f278987a6605757302a6e40e896257570d11c51628968ccb2a47e80c6c1 \ - --hash=sha256:7a7262fd930bd3e36c50b9a64897aec3fafff3dfdeec9623ae22b40e93f99bb8 +zipp==3.17.0 \ + --hash=sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31 \ + --hash=sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: -setuptools==65.5.1 \ - --hash=sha256:d0b9a8433464d5800cbe05094acf5c6d52a91bfac9b52bcfc4d41382be5d5d31 \ - --hash=sha256:e197a19aa8ec9722928f2206f8de752def0e4c9fc6953527360d1c36d94ddb2f +setuptools==68.2.2 \ + --hash=sha256:4ac1475276d2f1c48684874089fefcd83bd7162ddaafb81fac866ba0db282a87 \ + --hash=sha256:b454a35605876da60632df1a60f736524eb73cc47bbc9f3f1ef1b644de74fd2a # via -r requirements.in From 597efd11d15f20549010b4301be4d9768326e6a2 Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 29 Nov 2023 15:49:59 -0800 Subject: [PATCH 47/52] chore: Update gapic-generator-python to v1.12.0 (#891) --- .../bigtable_instance_admin/async_client.py | 74 +++++++++--------- .../bigtable_table_admin/async_client.py | 76 +++++++++---------- .../services/bigtable/async_client.py | 26 +++---- 3 files changed, 88 insertions(+), 88 deletions(-) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index 3f67620c0..e4c4639af 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -33,14 +33,14 @@ from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 -from google.api_core import retry as retries +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -305,7 +305,7 @@ async def create_instance( This corresponds to the ``clusters`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -404,7 +404,7 @@ async def get_instance( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -440,7 +440,7 @@ async def get_instance( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_instance, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -496,7 +496,7 @@ async def list_instances( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -529,7 +529,7 @@ async def list_instances( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_instances, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -581,7 +581,7 @@ async def update_instance( served from all [Clusters][google.bigtable.admin.v2.Cluster] in the instance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -603,7 +603,7 @@ async def update_instance( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_instance, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -669,7 +669,7 @@ async def partial_update_instance( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -709,7 +709,7 @@ async def partial_update_instance( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.partial_update_instance, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -775,7 +775,7 @@ async def delete_instance( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -869,7 +869,7 @@ async def create_cluster( This corresponds to the ``cluster`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -964,7 +964,7 @@ async def get_cluster( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -999,7 +999,7 @@ async def get_cluster( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_cluster, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1057,7 +1057,7 @@ async def list_clusters( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1090,7 +1090,7 @@ async def list_clusters( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_clusters, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1141,7 +1141,7 @@ async def update_cluster( location, capable of serving all [Tables][google.bigtable.admin.v2.Table] in the parent [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1164,7 +1164,7 @@ async def update_cluster( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_cluster, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1248,7 +1248,7 @@ async def partial_update_cluster( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1343,7 +1343,7 @@ async def delete_cluster( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1431,7 +1431,7 @@ async def create_app_profile( This corresponds to the ``app_profile`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1515,7 +1515,7 @@ async def get_app_profile( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1549,7 +1549,7 @@ async def get_app_profile( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_app_profile, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1608,7 +1608,7 @@ async def list_app_profiles( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1644,7 +1644,7 @@ async def list_app_profiles( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_app_profiles, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1717,7 +1717,7 @@ async def update_app_profile( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1754,7 +1754,7 @@ async def update_app_profile( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.update_app_profile, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1820,7 +1820,7 @@ async def delete_app_profile( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1890,7 +1890,7 @@ async def get_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1953,7 +1953,7 @@ async def get_iam_policy( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_iam_policy, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -2008,7 +2008,7 @@ async def set_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2126,7 +2126,7 @@ async def test_iam_permissions( This corresponds to the ``permissions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2160,7 +2160,7 @@ async def test_iam_permissions( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.test_iam_permissions, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -2217,7 +2217,7 @@ async def list_hot_tablets( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2253,7 +2253,7 @@ async def list_hot_tablets( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_hot_tablets, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index d5edeb91d..5a4435bde 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -33,14 +33,14 @@ from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 -from google.api_core import retry as retries +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -282,7 +282,7 @@ async def create_table( This corresponds to the ``table`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -402,7 +402,7 @@ async def create_table_from_snapshot( This corresponds to the ``source_snapshot`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -494,7 +494,7 @@ async def list_tables( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -530,7 +530,7 @@ async def list_tables( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_tables, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -593,7 +593,7 @@ async def get_table( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -628,7 +628,7 @@ async def get_table( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_table, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -701,7 +701,7 @@ async def update_table( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -794,7 +794,7 @@ async def delete_table( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -865,7 +865,7 @@ async def undelete_table( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -976,7 +976,7 @@ async def modify_column_families( This corresponds to the ``modifications`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1051,7 +1051,7 @@ async def drop_row_range( request (Optional[Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]]): The request object. Request message for [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1111,7 +1111,7 @@ async def generate_consistency_token( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1144,7 +1144,7 @@ async def generate_consistency_token( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.generate_consistency_token, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1211,7 +1211,7 @@ async def check_consistency( This corresponds to the ``consistency_token`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1246,7 +1246,7 @@ async def check_consistency( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.check_consistency, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1344,7 +1344,7 @@ async def snapshot_table( This corresponds to the ``description`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1459,7 +1459,7 @@ async def get_snapshot( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1503,7 +1503,7 @@ async def get_snapshot( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_snapshot, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1576,7 +1576,7 @@ async def list_snapshots( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1619,7 +1619,7 @@ async def list_snapshots( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_snapshots, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1698,7 +1698,7 @@ async def delete_snapshot( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1794,7 +1794,7 @@ async def create_backup( This corresponds to the ``backup`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1886,7 +1886,7 @@ async def get_backup( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1917,7 +1917,7 @@ async def get_backup( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_backup, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -1987,7 +1987,7 @@ async def update_backup( This corresponds to the ``update_mask`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2066,7 +2066,7 @@ async def delete_backup( This corresponds to the ``name`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2138,7 +2138,7 @@ async def list_backups( This corresponds to the ``parent`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2174,7 +2174,7 @@ async def list_backups( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.list_backups, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -2235,7 +2235,7 @@ async def restore_table( request (Optional[Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]]): The request object. The request for [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2350,7 +2350,7 @@ async def copy_backup( This corresponds to the ``expire_time`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2446,7 +2446,7 @@ async def get_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2509,7 +2509,7 @@ async def get_iam_policy( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.get_iam_policy, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, @@ -2564,7 +2564,7 @@ async def set_iam_policy( This corresponds to the ``resource`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2682,7 +2682,7 @@ async def test_iam_permissions( This corresponds to the ``permissions`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -2716,7 +2716,7 @@ async def test_iam_permissions( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.test_iam_permissions, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=1.0, maximum=60.0, multiplier=2, diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py index 07a782d0c..33686a4a8 100644 --- a/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -35,14 +35,14 @@ from google.api_core.client_options import ClientOptions from google.api_core import exceptions as core_exceptions from google.api_core import gapic_v1 -from google.api_core import retry as retries +from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data @@ -250,7 +250,7 @@ def read_rows( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -345,7 +345,7 @@ def sample_row_keys( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -457,7 +457,7 @@ async def mutate_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -496,7 +496,7 @@ async def mutate_row( # and friendly error handling. rpc = gapic_v1.method_async.wrap_method( self._client._transport.mutate_row, - default_retry=retries.Retry( + default_retry=retries.AsyncRetry( initial=0.01, maximum=60.0, multiplier=2, @@ -579,7 +579,7 @@ def mutate_rows( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -718,7 +718,7 @@ async def check_and_mutate_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -828,7 +828,7 @@ async def ping_and_warm( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -946,7 +946,7 @@ async def read_modify_write_row( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1052,7 +1052,7 @@ def generate_initial_change_stream_partitions( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be @@ -1151,7 +1151,7 @@ def read_change_stream( This corresponds to the ``app_profile_id`` field on the ``request`` instance; if ``request`` is provided, this should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, + retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. metadata (Sequence[Tuple[str, str]]): Strings which should be From d218f4ebd4ed6705721dca9318df955b40b0d0ac Mon Sep 17 00:00:00 2001 From: Anthonios Partheniou Date: Fri, 1 Dec 2023 19:42:27 -0500 Subject: [PATCH 48/52] feat: Introduce compatibility with native namespace packages (#893) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: Introduce compatibility with native namespace packages * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- google/__init__.py | 6 ------ google/cloud/__init__.py | 6 ------ mypy.ini | 2 +- noxfile.py | 2 +- owlbot.py | 2 +- setup.py | 9 +-------- tests/unit/test_packaging.py | 37 ++++++++++++++++++++++++++++++++++++ 7 files changed, 41 insertions(+), 23 deletions(-) delete mode 100644 google/__init__.py delete mode 100644 google/cloud/__init__.py create mode 100644 tests/unit/test_packaging.py diff --git a/google/__init__.py b/google/__init__.py deleted file mode 100644 index a5ba80656..000000000 --- a/google/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - pass diff --git a/google/cloud/__init__.py b/google/cloud/__init__.py deleted file mode 100644 index a5ba80656..000000000 --- a/google/cloud/__init__.py +++ /dev/null @@ -1,6 +0,0 @@ -try: - import pkg_resources - - pkg_resources.declare_namespace(__name__) -except ImportError: - pass diff --git a/mypy.ini b/mypy.ini index f12ed46fc..31cc24223 100644 --- a/mypy.ini +++ b/mypy.ini @@ -1,5 +1,5 @@ [mypy] -python_version = 3.6 +python_version = 3.8 namespace_packages = True exclude = tests/unit/gapic/ diff --git a/noxfile.py b/noxfile.py index fafee6ac4..63a169cbb 100644 --- a/noxfile.py +++ b/noxfile.py @@ -135,7 +135,7 @@ def mypy(session): ) session.install("google-cloud-testutils") # TODO: also verify types on tests, all of google package - session.run("mypy", "google/", "tests/") + session.run("mypy", "-p", "google", "-p", "tests") @nox.session(python=DEFAULT_PYTHON_VERSION) diff --git a/owlbot.py b/owlbot.py index 78c6ca2f8..4b06aea77 100644 --- a/owlbot.py +++ b/owlbot.py @@ -169,7 +169,7 @@ def mypy(session): session.install("mypy", "types-setuptools", "types-protobuf", "types-mock", "types-requests") session.install("google-cloud-testutils") # TODO: also verify types on tests, all of google package - session.run("mypy", "google/", "tests/") + session.run("mypy", "-p", "google", "-p", "tests") @nox.session(python=DEFAULT_PYTHON_VERSION) diff --git a/setup.py b/setup.py index 495730888..617ec77dd 100644 --- a/setup.py +++ b/setup.py @@ -59,16 +59,10 @@ # benchmarks, etc. packages = [ package - for package in setuptools.PEP420PackageFinder.find() + for package in setuptools.find_namespace_packages() if package.startswith("google") ] -# Determine which namespaces are needed. -namespaces = ["google"] -if "google.cloud" in packages: - namespaces.append("google.cloud") - - setuptools.setup( name=name, version=version, @@ -93,7 +87,6 @@ ], platforms="Posix; MacOS X; Windows", packages=packages, - namespace_packages=namespaces, install_requires=dependencies, extras_require=extras, scripts=[ diff --git a/tests/unit/test_packaging.py b/tests/unit/test_packaging.py new file mode 100644 index 000000000..93fa4d1c3 --- /dev/null +++ b/tests/unit/test_packaging.py @@ -0,0 +1,37 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import os +import subprocess +import sys + + +def test_namespace_package_compat(tmp_path): + # The ``google`` namespace package should not be masked + # by the presence of ``google-cloud-bigtable``. + google = tmp_path / "google" + google.mkdir() + google.joinpath("othermod.py").write_text("") + env = dict(os.environ, PYTHONPATH=str(tmp_path)) + cmd = [sys.executable, "-m", "google.othermod"] + subprocess.check_call(cmd, env=env) + + # The ``google.cloud`` namespace package should not be masked + # by the presence of ``google-cloud-bigtable``. + google_cloud = tmp_path / "google" / "cloud" + google_cloud.mkdir() + google_cloud.joinpath("othermod.py").write_text("") + env = dict(os.environ, PYTHONPATH=str(tmp_path)) + cmd = [sys.executable, "-m", "google.cloud.othermod"] + subprocess.check_call(cmd, env=env) From 13ee2cfdef0c747603b1529b8821864ac78d6dba Mon Sep 17 00:00:00 2001 From: Mend Renovate Date: Tue, 12 Dec 2023 16:57:13 +0100 Subject: [PATCH 49/52] chore(deps): update all dependencies (#771) * chore(deps): update all dependencies * Pin apache-beam for python 3.7 * Pin apache-beam for python 3.7 * Testing earlier version of apache-beam for python 3.7 * revert * revert --------- Co-authored-by: Anthonios Partheniou --- .github/workflows/system_emulated.yml | 4 ++-- samples/beam/requirements-test.txt | 2 +- samples/beam/requirements.txt | 2 +- samples/hello/requirements-test.txt | 2 +- samples/hello/requirements.txt | 4 ++-- samples/hello_happybase/requirements-test.txt | 2 +- samples/instanceadmin/requirements-test.txt | 2 +- samples/instanceadmin/requirements.txt | 2 +- samples/metricscaler/requirements-test.txt | 4 ++-- samples/metricscaler/requirements.txt | 4 ++-- samples/quickstart/requirements-test.txt | 2 +- samples/quickstart/requirements.txt | 2 +- samples/quickstart_happybase/requirements-test.txt | 2 +- samples/snippets/deletes/requirements-test.txt | 2 +- samples/snippets/deletes/requirements.txt | 2 +- samples/snippets/filters/requirements-test.txt | 2 +- samples/snippets/filters/requirements.txt | 2 +- samples/snippets/reads/requirements-test.txt | 2 +- samples/snippets/reads/requirements.txt | 2 +- samples/snippets/writes/requirements-test.txt | 2 +- samples/snippets/writes/requirements.txt | 2 +- samples/tableadmin/requirements-test.txt | 2 +- samples/tableadmin/requirements.txt | 2 +- 23 files changed, 27 insertions(+), 27 deletions(-) diff --git a/.github/workflows/system_emulated.yml b/.github/workflows/system_emulated.yml index e1f43fd40..f1aa7e87c 100644 --- a/.github/workflows/system_emulated.yml +++ b/.github/workflows/system_emulated.yml @@ -7,7 +7,7 @@ on: jobs: run-systests: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: @@ -20,7 +20,7 @@ jobs: python-version: '3.8' - name: Setup GCloud SDK - uses: google-github-actions/setup-gcloud@v1.1.0 + uses: google-github-actions/setup-gcloud@v1.1.1 - name: Install / run Nox run: | diff --git a/samples/beam/requirements-test.txt b/samples/beam/requirements-test.txt index c4d04a08d..70613be0c 100644 --- a/samples/beam/requirements-test.txt +++ b/samples/beam/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/samples/beam/requirements.txt b/samples/beam/requirements.txt index 8be9b98e0..9b95d0b52 100644 --- a/samples/beam/requirements.txt +++ b/samples/beam/requirements.txt @@ -1,3 +1,3 @@ apache-beam==2.46.0 google-cloud-bigtable==2.17.0 -google-cloud-core==2.3.2 +google-cloud-core==2.3.3 diff --git a/samples/hello/requirements-test.txt b/samples/hello/requirements-test.txt index c4d04a08d..70613be0c 100644 --- a/samples/hello/requirements-test.txt +++ b/samples/hello/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/samples/hello/requirements.txt b/samples/hello/requirements.txt index 199541ffe..a76d144e6 100644 --- a/samples/hello/requirements.txt +++ b/samples/hello/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 -google-cloud-core==2.3.2 +google-cloud-bigtable==2.20.0 +google-cloud-core==2.3.3 diff --git a/samples/hello_happybase/requirements-test.txt b/samples/hello_happybase/requirements-test.txt index c4d04a08d..70613be0c 100644 --- a/samples/hello_happybase/requirements-test.txt +++ b/samples/hello_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/samples/instanceadmin/requirements-test.txt b/samples/instanceadmin/requirements-test.txt index c4d04a08d..70613be0c 100644 --- a/samples/instanceadmin/requirements-test.txt +++ b/samples/instanceadmin/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/samples/instanceadmin/requirements.txt b/samples/instanceadmin/requirements.txt index 04e476254..bba9ed8cf 100644 --- a/samples/instanceadmin/requirements.txt +++ b/samples/instanceadmin/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.20.0 backoff==2.2.1 diff --git a/samples/metricscaler/requirements-test.txt b/samples/metricscaler/requirements-test.txt index 761227068..d8ae088dd 100644 --- a/samples/metricscaler/requirements-test.txt +++ b/samples/metricscaler/requirements-test.txt @@ -1,3 +1,3 @@ -pytest==7.3.1 -mock==5.0.2 +pytest==7.4.0 +mock==5.1.0 google-cloud-testutils diff --git a/samples/metricscaler/requirements.txt b/samples/metricscaler/requirements.txt index 02e08b4c8..c0fce2294 100644 --- a/samples/metricscaler/requirements.txt +++ b/samples/metricscaler/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 -google-cloud-monitoring==2.14.2 +google-cloud-bigtable==2.20.0 +google-cloud-monitoring==2.15.1 diff --git a/samples/quickstart/requirements-test.txt b/samples/quickstart/requirements-test.txt index c4d04a08d..70613be0c 100644 --- a/samples/quickstart/requirements-test.txt +++ b/samples/quickstart/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/samples/quickstart/requirements.txt b/samples/quickstart/requirements.txt index 909f8c365..83e37754e 100644 --- a/samples/quickstart/requirements.txt +++ b/samples/quickstart/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.20.0 diff --git a/samples/quickstart_happybase/requirements-test.txt b/samples/quickstart_happybase/requirements-test.txt index c4d04a08d..70613be0c 100644 --- a/samples/quickstart_happybase/requirements-test.txt +++ b/samples/quickstart_happybase/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/samples/snippets/deletes/requirements-test.txt b/samples/snippets/deletes/requirements-test.txt index c4d04a08d..70613be0c 100644 --- a/samples/snippets/deletes/requirements-test.txt +++ b/samples/snippets/deletes/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/samples/snippets/deletes/requirements.txt b/samples/snippets/deletes/requirements.txt index 200665631..85b4e786f 100644 --- a/samples/snippets/deletes/requirements.txt +++ b/samples/snippets/deletes/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.20.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/samples/snippets/filters/requirements-test.txt b/samples/snippets/filters/requirements-test.txt index c4d04a08d..70613be0c 100644 --- a/samples/snippets/filters/requirements-test.txt +++ b/samples/snippets/filters/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/samples/snippets/filters/requirements.txt b/samples/snippets/filters/requirements.txt index 200665631..85b4e786f 100644 --- a/samples/snippets/filters/requirements.txt +++ b/samples/snippets/filters/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.20.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/samples/snippets/reads/requirements-test.txt b/samples/snippets/reads/requirements-test.txt index c4d04a08d..70613be0c 100644 --- a/samples/snippets/reads/requirements-test.txt +++ b/samples/snippets/reads/requirements-test.txt @@ -1 +1 @@ -pytest==7.3.1 +pytest==7.4.0 diff --git a/samples/snippets/reads/requirements.txt b/samples/snippets/reads/requirements.txt index 200665631..85b4e786f 100644 --- a/samples/snippets/reads/requirements.txt +++ b/samples/snippets/reads/requirements.txt @@ -1,2 +1,2 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.20.0 snapshottest==0.6.0 \ No newline at end of file diff --git a/samples/snippets/writes/requirements-test.txt b/samples/snippets/writes/requirements-test.txt index 96aa71dab..cbd0a47de 100644 --- a/samples/snippets/writes/requirements-test.txt +++ b/samples/snippets/writes/requirements-test.txt @@ -1,2 +1,2 @@ backoff==2.2.1 -pytest==7.3.1 +pytest==7.4.0 diff --git a/samples/snippets/writes/requirements.txt b/samples/snippets/writes/requirements.txt index 32cead029..90fa5577c 100644 --- a/samples/snippets/writes/requirements.txt +++ b/samples/snippets/writes/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.17.0 \ No newline at end of file +google-cloud-bigtable==2.20.0 \ No newline at end of file diff --git a/samples/tableadmin/requirements-test.txt b/samples/tableadmin/requirements-test.txt index ca1f33bd3..b4ead9993 100644 --- a/samples/tableadmin/requirements-test.txt +++ b/samples/tableadmin/requirements-test.txt @@ -1,2 +1,2 @@ -pytest==7.3.1 +pytest==7.4.0 google-cloud-testutils==1.3.3 diff --git a/samples/tableadmin/requirements.txt b/samples/tableadmin/requirements.txt index 909f8c365..83e37754e 100644 --- a/samples/tableadmin/requirements.txt +++ b/samples/tableadmin/requirements.txt @@ -1 +1 @@ -google-cloud-bigtable==2.17.0 +google-cloud-bigtable==2.20.0 From 4f050aa5aed9a9dcf209779d5c10e5de8e2ff19e Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Tue, 12 Dec 2023 21:25:35 +0000 Subject: [PATCH 50/52] feat: Add support for Python 3.12 (#888) --- .github/.OwlBot.lock.yaml | 4 +- .github/workflows/unittest.yml | 2 +- .kokoro/samples/python3.12/common.cfg | 40 ++++++++++++++++++++ .kokoro/samples/python3.12/continuous.cfg | 6 +++ .kokoro/samples/python3.12/periodic-head.cfg | 11 ++++++ .kokoro/samples/python3.12/periodic.cfg | 6 +++ .kokoro/samples/python3.12/presubmit.cfg | 6 +++ CONTRIBUTING.rst | 6 ++- noxfile.py | 2 +- samples/beam/noxfile.py | 2 +- samples/hello/noxfile.py | 2 +- samples/hello_happybase/noxfile.py | 2 +- samples/instanceadmin/noxfile.py | 2 +- samples/metricscaler/noxfile.py | 2 +- samples/quickstart/noxfile.py | 2 +- samples/quickstart_happybase/noxfile.py | 2 +- samples/snippets/deletes/noxfile.py | 2 +- samples/snippets/filters/noxfile.py | 2 +- samples/snippets/reads/noxfile.py | 2 +- samples/snippets/writes/noxfile.py | 2 +- samples/tableadmin/noxfile.py | 2 +- setup.py | 1 + testing/constraints-3.12.txt | 0 tests/unit/test_table.py | 8 ++-- 24 files changed, 94 insertions(+), 22 deletions(-) create mode 100644 .kokoro/samples/python3.12/common.cfg create mode 100644 .kokoro/samples/python3.12/continuous.cfg create mode 100644 .kokoro/samples/python3.12/periodic-head.cfg create mode 100644 .kokoro/samples/python3.12/periodic.cfg create mode 100644 .kokoro/samples/python3.12/presubmit.cfg create mode 100644 testing/constraints-3.12.txt diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 453b540c1..eb4d9f794 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,5 +13,5 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:caffe0a9277daeccc4d1de5c9b55ebba0901b57c2f713ec9c876b0d4ec064f61 -# created: 2023-11-08T19:46:45.022803742Z + digest: sha256:bacc3af03bff793a03add584537b36b5644342931ad989e3ba1171d3bd5399f5 +# created: 2023-11-23T18:17:28.105124211Z diff --git a/.github/workflows/unittest.yml b/.github/workflows/unittest.yml index 8057a7691..a32027b49 100644 --- a/.github/workflows/unittest.yml +++ b/.github/workflows/unittest.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python: ['3.7', '3.8', '3.9', '3.10', '3.11'] + python: ['3.7', '3.8', '3.9', '3.10', '3.11', '3.12'] steps: - name: Checkout uses: actions/checkout@v3 diff --git a/.kokoro/samples/python3.12/common.cfg b/.kokoro/samples/python3.12/common.cfg new file mode 100644 index 000000000..34e0a95f3 --- /dev/null +++ b/.kokoro/samples/python3.12/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.12" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "python-docs-samples-tests-312" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-bigtable/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.12/continuous.cfg b/.kokoro/samples/python3.12/continuous.cfg new file mode 100644 index 000000000..a1c8d9759 --- /dev/null +++ b/.kokoro/samples/python3.12/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.12/periodic-head.cfg b/.kokoro/samples/python3.12/periodic-head.cfg new file mode 100644 index 000000000..be25a34f9 --- /dev/null +++ b/.kokoro/samples/python3.12/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-bigtable/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/samples/python3.12/periodic.cfg b/.kokoro/samples/python3.12/periodic.cfg new file mode 100644 index 000000000..71cd1e597 --- /dev/null +++ b/.kokoro/samples/python3.12/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} diff --git a/.kokoro/samples/python3.12/presubmit.cfg b/.kokoro/samples/python3.12/presubmit.cfg new file mode 100644 index 000000000..a1c8d9759 --- /dev/null +++ b/.kokoro/samples/python3.12/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 504fb3742..947c129b7 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -22,7 +22,7 @@ In order to add a feature: documentation. - The feature must work fully on the following CPython versions: - 3.7, 3.8, 3.9, 3.10 and 3.11 on both UNIX and Windows. + 3.7, 3.8, 3.9, 3.10, 3.11 and 3.12 on both UNIX and Windows. - The feature must not add unnecessary dependencies (where "unnecessary" is of course subjective, but new dependencies should @@ -72,7 +72,7 @@ We use `nox `__ to instrument our tests. - To run a single unit test:: - $ nox -s unit-3.11 -- -k + $ nox -s unit-3.12 -- -k .. note:: @@ -226,12 +226,14 @@ We support: - `Python 3.9`_ - `Python 3.10`_ - `Python 3.11`_ +- `Python 3.12`_ .. _Python 3.7: https://docs.python.org/3.7/ .. _Python 3.8: https://docs.python.org/3.8/ .. _Python 3.9: https://docs.python.org/3.9/ .. _Python 3.10: https://docs.python.org/3.10/ .. _Python 3.11: https://docs.python.org/3.11/ +.. _Python 3.12: https://docs.python.org/3.12/ Supported versions can be found in our ``noxfile.py`` `config`_. diff --git a/noxfile.py b/noxfile.py index 63a169cbb..a6fb7d6f3 100644 --- a/noxfile.py +++ b/noxfile.py @@ -34,7 +34,7 @@ DEFAULT_PYTHON_VERSION = "3.8" -UNIT_TEST_PYTHON_VERSIONS: List[str] = ["3.7", "3.8", "3.9", "3.10", "3.11"] +UNIT_TEST_PYTHON_VERSIONS: List[str] = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] UNIT_TEST_STANDARD_DEPENDENCIES = [ "mock", "asyncmock", diff --git a/samples/beam/noxfile.py b/samples/beam/noxfile.py index 3d4395024..80ffdb178 100644 --- a/samples/beam/noxfile.py +++ b/samples/beam/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/hello/noxfile.py b/samples/hello/noxfile.py index 7c8a63994..483b55901 100644 --- a/samples/hello/noxfile.py +++ b/samples/hello/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/hello_happybase/noxfile.py b/samples/hello_happybase/noxfile.py index 7c8a63994..483b55901 100644 --- a/samples/hello_happybase/noxfile.py +++ b/samples/hello_happybase/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/instanceadmin/noxfile.py b/samples/instanceadmin/noxfile.py index 7c8a63994..483b55901 100644 --- a/samples/instanceadmin/noxfile.py +++ b/samples/instanceadmin/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/metricscaler/noxfile.py b/samples/metricscaler/noxfile.py index 7c8a63994..483b55901 100644 --- a/samples/metricscaler/noxfile.py +++ b/samples/metricscaler/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/quickstart/noxfile.py b/samples/quickstart/noxfile.py index 7c8a63994..483b55901 100644 --- a/samples/quickstart/noxfile.py +++ b/samples/quickstart/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/quickstart_happybase/noxfile.py b/samples/quickstart_happybase/noxfile.py index 7c8a63994..483b55901 100644 --- a/samples/quickstart_happybase/noxfile.py +++ b/samples/quickstart_happybase/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/snippets/deletes/noxfile.py b/samples/snippets/deletes/noxfile.py index 7c8a63994..483b55901 100644 --- a/samples/snippets/deletes/noxfile.py +++ b/samples/snippets/deletes/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/snippets/filters/noxfile.py b/samples/snippets/filters/noxfile.py index 7c8a63994..483b55901 100644 --- a/samples/snippets/filters/noxfile.py +++ b/samples/snippets/filters/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/snippets/reads/noxfile.py b/samples/snippets/reads/noxfile.py index 7c8a63994..483b55901 100644 --- a/samples/snippets/reads/noxfile.py +++ b/samples/snippets/reads/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/snippets/writes/noxfile.py b/samples/snippets/writes/noxfile.py index 7c8a63994..483b55901 100644 --- a/samples/snippets/writes/noxfile.py +++ b/samples/snippets/writes/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/samples/tableadmin/noxfile.py b/samples/tableadmin/noxfile.py index 7c8a63994..483b55901 100644 --- a/samples/tableadmin/noxfile.py +++ b/samples/tableadmin/noxfile.py @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11", "3.12"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] diff --git a/setup.py b/setup.py index 617ec77dd..e9bce0960 100644 --- a/setup.py +++ b/setup.py @@ -82,6 +82,7 @@ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "Operating System :: OS Independent", "Topic :: Internet", ], diff --git a/testing/constraints-3.12.txt b/testing/constraints-3.12.txt new file mode 100644 index 000000000..e69de29bb diff --git a/tests/unit/test_table.py b/tests/unit/test_table.py index f2dc14485..032363bd7 100644 --- a/tests/unit/test_table.py +++ b/tests/unit/test_table.py @@ -1067,8 +1067,8 @@ def test_table_yield_retry_rows(): for row in table.yield_rows(start_key=ROW_KEY_1, end_key=ROW_KEY_2): rows.append(row) - assert len(warned) == 1 - assert warned[0].category is DeprecationWarning + assert len(warned) >= 1 + assert DeprecationWarning in [w.category for w in warned] result = rows[1] assert result.row_key == ROW_KEY_2 @@ -1140,8 +1140,8 @@ def test_table_yield_rows_with_row_set(): for row in table.yield_rows(row_set=row_set): rows.append(row) - assert len(warned) == 1 - assert warned[0].category is DeprecationWarning + assert len(warned) >= 1 + assert DeprecationWarning in [w.category for w in warned] assert rows[0].row_key == ROW_KEY_1 assert rows[1].row_key == ROW_KEY_2 From fe58f617c7364d7e99e2ec50abd5f080852bf033 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Tue, 12 Dec 2023 13:58:06 -0800 Subject: [PATCH 51/52] fix: mutations batcher race condition (#896) --- google/cloud/bigtable/batcher.py | 118 ++++++++++++++++++------------- tests/system/conftest.py | 2 +- tests/system/test_data_api.py | 36 ++++++++++ 3 files changed, 104 insertions(+), 52 deletions(-) diff --git a/google/cloud/bigtable/batcher.py b/google/cloud/bigtable/batcher.py index a6eb806e9..8f0cabadd 100644 --- a/google/cloud/bigtable/batcher.py +++ b/google/cloud/bigtable/batcher.py @@ -53,12 +53,19 @@ def __init__(self, max_mutation_bytes=MAX_MUTATION_SIZE, flush_count=FLUSH_COUNT self.flush_count = flush_count def get(self): - """Retrieve an item from the queue. Recalculate queue size.""" - row = self._queue.get() - mutation_size = row.get_mutations_size() - self.total_mutation_count -= len(row._get_mutations()) - self.total_size -= mutation_size - return row + """ + Retrieve an item from the queue. Recalculate queue size. + + If the queue is empty, return None. + """ + try: + row = self._queue.get_nowait() + mutation_size = row.get_mutations_size() + self.total_mutation_count -= len(row._get_mutations()) + self.total_size -= mutation_size + return row + except queue.Empty: + return None def put(self, item): """Insert an item to the queue. Recalculate queue size.""" @@ -79,9 +86,6 @@ def full(self): return True return False - def empty(self): - return self._queue.empty() - @dataclass class _BatchInfo: @@ -292,8 +296,10 @@ def flush(self): * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. """ rows_to_flush = [] - while not self._rows.empty(): - rows_to_flush.append(self._rows.get()) + row = self._rows.get() + while row is not None: + rows_to_flush.append(row) + row = self._rows.get() response = self._flush_rows(rows_to_flush) return response @@ -303,58 +309,68 @@ def _flush_async(self): :raises: * :exc:`.batcherMutationsBatchError` if there's any error in the mutations. """ - - rows_to_flush = [] - mutations_count = 0 - mutations_size = 0 - rows_count = 0 - batch_info = _BatchInfo() - - while not self._rows.empty(): - row = self._rows.get() - mutations_count += len(row._get_mutations()) - mutations_size += row.get_mutations_size() - rows_count += 1 - rows_to_flush.append(row) - batch_info.mutations_count = mutations_count - batch_info.rows_count = rows_count - batch_info.mutations_size = mutations_size - - if ( - rows_count >= self.flush_count - or mutations_size >= self.max_row_bytes - or mutations_count >= self.flow_control.max_mutations - or mutations_size >= self.flow_control.max_mutation_bytes - or self._rows.empty() # submit when it reached the end of the queue + next_row = self._rows.get() + while next_row is not None: + # start a new batch + rows_to_flush = [next_row] + batch_info = _BatchInfo( + mutations_count=len(next_row._get_mutations()), + rows_count=1, + mutations_size=next_row.get_mutations_size(), + ) + # fill up batch with rows + next_row = self._rows.get() + while next_row is not None and self._row_fits_in_batch( + next_row, batch_info ): - # wait for resources to become available, before submitting any new batch - self.flow_control.wait() - # once unblocked, submit a batch - # event flag will be set by control_flow to block subsequent thread, but not blocking this one - self.flow_control.control_flow(batch_info) - future = self._executor.submit(self._flush_rows, rows_to_flush) - self.futures_mapping[future] = batch_info - future.add_done_callback(self._batch_completed_callback) - - # reset and start a new batch - rows_to_flush = [] - mutations_size = 0 - rows_count = 0 - mutations_count = 0 - batch_info = _BatchInfo() + rows_to_flush.append(next_row) + batch_info.mutations_count += len(next_row._get_mutations()) + batch_info.rows_count += 1 + batch_info.mutations_size += next_row.get_mutations_size() + next_row = self._rows.get() + # send batch over network + # wait for resources to become available + self.flow_control.wait() + # once unblocked, submit the batch + # event flag will be set by control_flow to block subsequent thread, but not blocking this one + self.flow_control.control_flow(batch_info) + future = self._executor.submit(self._flush_rows, rows_to_flush) + # schedule release of resources from flow control + self.futures_mapping[future] = batch_info + future.add_done_callback(self._batch_completed_callback) def _batch_completed_callback(self, future): """Callback for when the mutation has finished to clean up the current batch and release items from the flow controller. - Raise exceptions if there's any. Release the resources locked by the flow control and allow enqueued tasks to be run. """ - processed_rows = self.futures_mapping[future] self.flow_control.release(processed_rows) del self.futures_mapping[future] + def _row_fits_in_batch(self, row, batch_info): + """Checks if a row can fit in the current batch. + + :type row: class + :param row: :class:`~google.cloud.bigtable.row.DirectRow`. + + :type batch_info: :class:`_BatchInfo` + :param batch_info: Information about the current batch. + + :rtype: bool + :returns: True if the row can fit in the current batch. + """ + new_rows_count = batch_info.rows_count + 1 + new_mutations_count = batch_info.mutations_count + len(row._get_mutations()) + new_mutations_size = batch_info.mutations_size + row.get_mutations_size() + return ( + new_rows_count <= self.flush_count + and new_mutations_size <= self.max_row_bytes + and new_mutations_count <= self.flow_control.max_mutations + and new_mutations_size <= self.flow_control.max_mutation_bytes + ) + def _flush_rows(self, rows_to_flush): """Mutate the specified rows. diff --git a/tests/system/conftest.py b/tests/system/conftest.py index f39fcba88..910c20970 100644 --- a/tests/system/conftest.py +++ b/tests/system/conftest.py @@ -58,7 +58,7 @@ def location_id(): @pytest.fixture(scope="session") def serve_nodes(): - return 3 + return 1 @pytest.fixture(scope="session") diff --git a/tests/system/test_data_api.py b/tests/system/test_data_api.py index 2ca7e1504..579837e34 100644 --- a/tests/system/test_data_api.py +++ b/tests/system/test_data_api.py @@ -381,3 +381,39 @@ def test_access_with_non_admin_client(data_client, data_instance_id, data_table_ instance = data_client.instance(data_instance_id) table = instance.table(data_table_id) assert table.read_row("nonesuch") is None # no raise + + +def test_mutations_batcher_threading(data_table, rows_to_delete): + """ + Test the mutations batcher by sending a bunch of mutations using different + flush methods + """ + import mock + import time + from google.cloud.bigtable.batcher import MutationsBatcher + + num_sent = 20 + all_results = [] + + def callback(results): + all_results.extend(results) + + # override flow control max elements + with mock.patch("google.cloud.bigtable.batcher.MAX_OUTSTANDING_ELEMENTS", 2): + with MutationsBatcher( + data_table, + flush_count=5, + flush_interval=0.07, + batch_completed_callback=callback, + ) as batcher: + # send mutations in a way that timed flushes and count flushes interleave + for i in range(num_sent): + row = data_table.direct_row("row{}".format(i)) + row.set_cell( + COLUMN_FAMILY_ID1, COL_NAME1, "val{}".format(i).encode("utf-8") + ) + rows_to_delete.append(row) + batcher.mutate(row) + time.sleep(0.01) + # ensure all mutations were sent + assert len(all_results) == num_sent From e4e63c7b5b91273b3aae04fda59cc5a21c848de2 Mon Sep 17 00:00:00 2001 From: Daniel Sanche Date: Tue, 12 Dec 2023 14:24:57 -0800 Subject: [PATCH 52/52] fix: add lock to flow control (#899) --- google/cloud/bigtable/batcher.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/google/cloud/bigtable/batcher.py b/google/cloud/bigtable/batcher.py index 8f0cabadd..f9b85386d 100644 --- a/google/cloud/bigtable/batcher.py +++ b/google/cloud/bigtable/batcher.py @@ -114,6 +114,7 @@ def __init__( self.inflight_size = 0 self.event = threading.Event() self.event.set() + self._lock = threading.Lock() def is_blocked(self): """Returns True if: @@ -132,8 +133,9 @@ def control_flow(self, batch_info): Calculate the resources used by this batch """ - self.inflight_mutations += batch_info.mutations_count - self.inflight_size += batch_info.mutations_size + with self._lock: + self.inflight_mutations += batch_info.mutations_count + self.inflight_size += batch_info.mutations_size self.set_flow_control_status() def wait(self): @@ -158,8 +160,9 @@ def release(self, batch_info): Release the resources. Decrement the row size to allow enqueued mutations to be run. """ - self.inflight_mutations -= batch_info.mutations_count - self.inflight_size -= batch_info.mutations_size + with self._lock: + self.inflight_mutations -= batch_info.mutations_count + self.inflight_size -= batch_info.mutations_size self.set_flow_control_status()