From 4105df762f1318c49bba030063897f0c50e4daee Mon Sep 17 00:00:00 2001 From: "gcf-owl-bot[bot]" <78513119+gcf-owl-bot[bot]@users.noreply.github.com> Date: Wed, 23 Aug 2023 15:53:15 -0700 Subject: [PATCH] feat: publish CopyBackup protos to external customers (#855) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: publish CopyBackup protos to external customers PiperOrigin-RevId: 557192020 Source-Link: https://github.com/googleapis/googleapis/commit/b4c238feaa1097c53798ed77035bbfeb7fc72e96 Source-Link: https://github.com/googleapis/googleapis-gen/commit/feccb30e3177da8b7b7e68149ca4bb914f8faf2a Copy-Tag: eyJwIjoiLmdpdGh1Yi8uT3dsQm90LnlhbWwiLCJoIjoiZmVjY2IzMGUzMTc3ZGE4YjdiN2U2ODE0OWNhNGJiOTE0ZjhmYWYyYSJ9 * 🦉 Updates from OwlBot post-processor See https://github.com/googleapis/repo-automation-bots/blob/main/packages/owl-bot/README.md --------- Co-authored-by: Owl Bot --- google/cloud/bigtable_admin/__init__.py | 4 + google/cloud/bigtable_admin_v2/__init__.py | 4 + .../bigtable_admin_v2/gapic_metadata.json | 15 + .../bigtable_table_admin/async_client.py | 142 ++++- .../services/bigtable_table_admin/client.py | 142 ++++- .../bigtable_table_admin/transports/base.py | 14 + .../bigtable_table_admin/transports/grpc.py | 33 +- .../transports/grpc_asyncio.py | 35 +- .../bigtable_table_admin/transports/rest.py | 136 +++++ .../cloud/bigtable_admin_v2/types/__init__.py | 4 + .../types/bigtable_table_admin.py | 104 +++- google/cloud/bigtable_admin_v2/types/table.py | 56 +- scripts/fixup_bigtable_admin_v2_keywords.py | 1 + .../test_bigtable_table_admin.py | 568 ++++++++++++++++++ 14 files changed, 1221 insertions(+), 37 deletions(-) diff --git a/google/cloud/bigtable_admin/__init__.py b/google/cloud/bigtable_admin/__init__.py index 43535ae20..d26d79b3c 100644 --- a/google/cloud/bigtable_admin/__init__.py +++ b/google/cloud/bigtable_admin/__init__.py @@ -115,6 +115,8 @@ from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CheckConsistencyResponse, ) +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupMetadata +from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupRequest from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ( CreateBackupMetadata, ) @@ -242,6 +244,8 @@ "UpdateInstanceMetadata", "CheckConsistencyRequest", "CheckConsistencyResponse", + "CopyBackupMetadata", + "CopyBackupRequest", "CreateBackupMetadata", "CreateBackupRequest", "CreateTableFromSnapshotMetadata", diff --git a/google/cloud/bigtable_admin_v2/__init__.py b/google/cloud/bigtable_admin_v2/__init__.py index 8033a0af7..811b956e0 100644 --- a/google/cloud/bigtable_admin_v2/__init__.py +++ b/google/cloud/bigtable_admin_v2/__init__.py @@ -51,6 +51,8 @@ from .types.bigtable_instance_admin import UpdateInstanceMetadata from .types.bigtable_table_admin import CheckConsistencyRequest from .types.bigtable_table_admin import CheckConsistencyResponse +from .types.bigtable_table_admin import CopyBackupMetadata +from .types.bigtable_table_admin import CopyBackupRequest from .types.bigtable_table_admin import CreateBackupMetadata from .types.bigtable_table_admin import CreateBackupRequest from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata @@ -116,6 +118,8 @@ "CheckConsistencyResponse", "Cluster", "ColumnFamily", + "CopyBackupMetadata", + "CopyBackupRequest", "CreateAppProfileRequest", "CreateBackupMetadata", "CreateBackupRequest", diff --git a/google/cloud/bigtable_admin_v2/gapic_metadata.json b/google/cloud/bigtable_admin_v2/gapic_metadata.json index d797338cc..9b3426470 100644 --- a/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ b/google/cloud/bigtable_admin_v2/gapic_metadata.json @@ -349,6 +349,11 @@ "check_consistency" ] }, + "CopyBackup": { + "methods": [ + "copy_backup" + ] + }, "CreateBackup": { "methods": [ "create_backup" @@ -474,6 +479,11 @@ "check_consistency" ] }, + "CopyBackup": { + "methods": [ + "copy_backup" + ] + }, "CreateBackup": { "methods": [ "create_backup" @@ -599,6 +609,11 @@ "check_consistency" ] }, + "CopyBackup": { + "methods": [ + "copy_backup" + ] + }, "CreateBackup": { "methods": [ "create_backup" diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 31ddfac72..d1c1811a6 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -2143,7 +2143,7 @@ async def list_backups( Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager: The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. Iterating over this object will yield results and resolve additional pages automatically. @@ -2218,9 +2218,8 @@ async def restore_table( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation_async.AsyncOperation: - r"""Create a new table by restoring from a completed backup. The new - table must be in the same project as the instance containing the - backup. The returned table [long-running + r"""Create a new table by restoring from a completed backup. The + returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -2283,6 +2282,141 @@ async def restore_table( # Done; return the response. return response + async def copy_backup( + self, + request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, + *, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + source_backup: Optional[str] = None, + expire_time: Optional[timestamp_pb2.Timestamp] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation_async.AsyncOperation: + r"""Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Args: + request (Optional[Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]]): + The request object. The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + parent (:class:`str`): + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already + exists. Values are of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (:class:`str`): + Required. The id of the new backup. The ``backup_id`` + along with ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup + name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_backup (:class:`str`): + Required. The source backup to be copied from. The + source backup needs to be in READY state for it to be + copied. Copying a copied backup is not allowed. Once + CopyBackup is in progress, the source backup cannot be + deleted or cleaned up on expiration until CopyBackup is + finished. Values are of the form: + ``projects//instances//clusters//backups/``. + + This corresponds to the ``source_backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + expire_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): + Required. Required. The expiration time of the copied + backup with microsecond granularity that must be at + least 6 hours and at most 30 days from the time the + request is received. Once the ``expire_time`` has + passed, Cloud Bigtable will delete the backup and free + the resources used by the backup. + + This corresponds to the ``expire_time`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation_async.AsyncOperation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, source_backup, expire_time]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + request = bigtable_table_admin.CopyBackupRequest(request) + + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if source_backup is not None: + request.source_backup = source_backup + if expire_time is not None: + request.expire_time = expire_time + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = gapic_v1.method_async.wrap_method( + self._client._transport.copy_backup, + default_timeout=None, + client_info=DEFAULT_CLIENT_INFO, + ) + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = await rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation_async.from_gapic( + response, + self._client._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CopyBackupMetadata, + ) + + # Done; return the response. + return response + async def get_iam_policy( self, request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index b7127c032..80231fce9 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -2407,7 +2407,7 @@ def list_backups( Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager: The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. + [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. Iterating over this object will yield results and resolve additional pages automatically. @@ -2472,9 +2472,8 @@ def restore_table( timeout: Union[float, object] = gapic_v1.method.DEFAULT, metadata: Sequence[Tuple[str, str]] = (), ) -> operation.Operation: - r"""Create a new table by restoring from a completed backup. The new - table must be in the same project as the instance containing the - backup. The returned table [long-running + r"""Create a new table by restoring from a completed backup. The + returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -2538,6 +2537,141 @@ def restore_table( # Done; return the response. return response + def copy_backup( + self, + request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, + *, + parent: Optional[str] = None, + backup_id: Optional[str] = None, + source_backup: Optional[str] = None, + expire_time: Optional[timestamp_pb2.Timestamp] = None, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Union[float, object] = gapic_v1.method.DEFAULT, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operation.Operation: + r"""Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Args: + request (Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]): + The request object. The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + parent (str): + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already + exists. Values are of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + + This corresponds to the ``parent`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + backup_id (str): + Required. The id of the new backup. The ``backup_id`` + along with ``parent`` are combined as + {parent}/backups/{backup_id} to create the full backup + name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in + length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + + This corresponds to the ``backup_id`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + source_backup (str): + Required. The source backup to be copied from. The + source backup needs to be in READY state for it to be + copied. Copying a copied backup is not allowed. Once + CopyBackup is in progress, the source backup cannot be + deleted or cleaned up on expiration until CopyBackup is + finished. Values are of the form: + ``projects//instances//clusters//backups/``. + + This corresponds to the ``source_backup`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Required. Required. The expiration time of the copied + backup with microsecond granularity that must be at + least 6 hours and at most 30 days from the time the + request is received. Once the ``expire_time`` has + passed, Cloud Bigtable will delete the backup and free + the resources used by the backup. + + This corresponds to the ``expire_time`` field + on the ``request`` instance; if ``request`` is provided, this + should not be set. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + google.api_core.operation.Operation: + An object representing a long-running operation. + + The result type for the operation will be + :class:`google.cloud.bigtable_admin_v2.types.Backup` A + backup of a Cloud Bigtable table. + + """ + # Create or coerce a protobuf request object. + # Quick check: If we got a request object, we should *not* have + # gotten any keyword arguments that map to the request. + has_flattened_params = any([parent, backup_id, source_backup, expire_time]) + if request is not None and has_flattened_params: + raise ValueError( + "If the `request` argument is set, then none of " + "the individual field arguments should be set." + ) + + # Minor optimization to avoid making a copy if the user passes + # in a bigtable_table_admin.CopyBackupRequest. + # There's no risk of modifying the input as we've already verified + # there are no flattened fields. + if not isinstance(request, bigtable_table_admin.CopyBackupRequest): + request = bigtable_table_admin.CopyBackupRequest(request) + # If we have keyword arguments corresponding to fields on the + # request, apply these. + if parent is not None: + request.parent = parent + if backup_id is not None: + request.backup_id = backup_id + if source_backup is not None: + request.source_backup = source_backup + if expire_time is not None: + request.expire_time = expire_time + + # Wrap the RPC method; this adds retry and timeout information, + # and friendly error handling. + rpc = self._transport._wrapped_methods[self._transport.copy_backup] + + # Certain fields should be provided within the metadata header; + # add these here. + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), + ) + + # Send the request. + response = rpc( + request, + retry=retry, + timeout=timeout, + metadata=metadata, + ) + + # Wrap the response in an operation future. + response = operation.from_gapic( + response, + self._transport.operations_client, + table.Backup, + metadata_type=bigtable_table_admin.CopyBackupMetadata, + ) + + # Done; return the response. + return response + def get_iam_policy( self, request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index 591c6bcfe..c3cf01a96 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -322,6 +322,11 @@ def _prep_wrapped_messages(self, client_info): default_timeout=60.0, client_info=client_info, ), + self.copy_backup: gapic_v1.method.wrap_method( + self.copy_backup, + default_timeout=None, + client_info=client_info, + ), self.get_iam_policy: gapic_v1.method.wrap_method( self.get_iam_policy, default_retry=retries.Retry( @@ -577,6 +582,15 @@ def restore_table( ]: raise NotImplementedError() + @property + def copy_backup( + self, + ) -> Callable[ + [bigtable_table_admin.CopyBackupRequest], + Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]], + ]: + raise NotImplementedError() + @property def get_iam_policy( self, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index e18be126c..34a1596fc 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -866,9 +866,8 @@ def restore_table( ) -> Callable[[bigtable_table_admin.RestoreTableRequest], operations_pb2.Operation]: r"""Return a callable for the restore table method over gRPC. - Create a new table by restoring from a completed backup. The new - table must be in the same project as the instance containing the - backup. The returned table [long-running + Create a new table by restoring from a completed backup. The + returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -894,6 +893,34 @@ def restore_table( ) return self._stubs["restore_table"] + @property + def copy_backup( + self, + ) -> Callable[[bigtable_table_admin.CopyBackupRequest], operations_pb2.Operation]: + r"""Return a callable for the copy backup method over gRPC. + + Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Returns: + Callable[[~.CopyBackupRequest], + ~.Operation]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "copy_backup" not in self._stubs: + self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", + request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["copy_backup"] + @property def get_iam_policy( self, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 8f72e3fe8..b19d4e7be 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -890,9 +890,8 @@ def restore_table( ]: r"""Return a callable for the restore table method over gRPC. - Create a new table by restoring from a completed backup. The new - table must be in the same project as the instance containing the - backup. The returned table [long-running + Create a new table by restoring from a completed backup. The + returned table [long-running operation][google.longrunning.Operation] can be used to track the progress of the operation, and to cancel it. The [metadata][google.longrunning.Operation.metadata] field type is @@ -918,6 +917,36 @@ def restore_table( ) return self._stubs["restore_table"] + @property + def copy_backup( + self, + ) -> Callable[ + [bigtable_table_admin.CopyBackupRequest], Awaitable[operations_pb2.Operation] + ]: + r"""Return a callable for the copy backup method over gRPC. + + Copy a Cloud Bigtable backup to a new backup in the + destination cluster located in the destination instance + and project. + + Returns: + Callable[[~.CopyBackupRequest], + Awaitable[~.Operation]]: + A function that, when called, will call the underlying RPC + on the server. + """ + # Generate a "stub function" on-the-fly which will actually make + # the request. + # gRPC handles serialization and deserialization, so we just need + # to pass in the functions for each. + if "copy_backup" not in self._stubs: + self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", + request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, + response_deserializer=operations_pb2.Operation.FromString, + ) + return self._stubs["copy_backup"] + @property def get_iam_policy( self, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 37518c8da..4b3a846dc 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -84,6 +84,14 @@ def post_check_consistency(self, response): logging.log(f"Received response: {response}") return response + def pre_copy_backup(self, request, metadata): + logging.log(f"Received request: {request}") + return request, metadata + + def post_copy_backup(self, response): + logging.log(f"Received response: {response}") + return response + def pre_create_backup(self, request, metadata): logging.log(f"Received request: {request}") return request, metadata @@ -281,6 +289,29 @@ def post_check_consistency( """ return response + def pre_copy_backup( + self, + request: bigtable_table_admin.CopyBackupRequest, + metadata: Sequence[Tuple[str, str]], + ) -> Tuple[bigtable_table_admin.CopyBackupRequest, Sequence[Tuple[str, str]]]: + """Pre-rpc interceptor for copy_backup + + Override in a subclass to manipulate the request or metadata + before they are sent to the BigtableTableAdmin server. + """ + return request, metadata + + def post_copy_backup( + self, response: operations_pb2.Operation + ) -> operations_pb2.Operation: + """Post-rpc interceptor for copy_backup + + Override in a subclass to manipulate the response + after it is returned by the BigtableTableAdmin server but before + it is returned to user code. + """ + return response + def pre_create_backup( self, request: bigtable_table_admin.CreateBackupRequest, @@ -1010,6 +1041,103 @@ def __call__( resp = self._interceptor.post_check_consistency(resp) return resp + class _CopyBackup(BigtableTableAdminRestStub): + def __hash__(self): + return hash("CopyBackup") + + __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = {} + + @classmethod + def _get_unset_required_fields(cls, message_dict): + return { + k: v + for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() + if k not in message_dict + } + + def __call__( + self, + request: bigtable_table_admin.CopyBackupRequest, + *, + retry: OptionalRetry = gapic_v1.method.DEFAULT, + timeout: Optional[float] = None, + metadata: Sequence[Tuple[str, str]] = (), + ) -> operations_pb2.Operation: + r"""Call the copy backup method over HTTP. + + Args: + request (~.bigtable_table_admin.CopyBackupRequest): + The request object. The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + retry (google.api_core.retry.Retry): Designation of what errors, if any, + should be retried. + timeout (float): The timeout for this request. + metadata (Sequence[Tuple[str, str]]): Strings which should be + sent along with the request as metadata. + + Returns: + ~.operations_pb2.Operation: + This resource represents a + long-running operation that is the + result of a network API call. + + """ + + http_options: List[Dict[str, str]] = [ + { + "method": "post", + "uri": "/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy", + "body": "*", + }, + ] + request, metadata = self._interceptor.pre_copy_backup(request, metadata) + pb_request = bigtable_table_admin.CopyBackupRequest.pb(request) + transcoded_request = path_template.transcode(http_options, pb_request) + + # Jsonify the request body + + body = json_format.MessageToJson( + transcoded_request["body"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + uri = transcoded_request["uri"] + method = transcoded_request["method"] + + # Jsonify the query params + query_params = json.loads( + json_format.MessageToJson( + transcoded_request["query_params"], + including_default_value_fields=False, + use_integers_for_enums=True, + ) + ) + query_params.update(self._get_unset_required_fields(query_params)) + + query_params["$alt"] = "json;enum-encoding=int" + + # Send the request + headers = dict(metadata) + headers["Content-Type"] = "application/json" + response = getattr(self._session, method)( + "{host}{uri}".format(host=self._host, uri=uri), + timeout=timeout, + headers=headers, + params=rest_helpers.flatten_query_params(query_params, strict=True), + data=body, + ) + + # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception + # subclass. + if response.status_code >= 400: + raise core_exceptions.from_http_response(response) + + # Return the response + resp = operations_pb2.Operation() + json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_copy_backup(resp) + return resp + class _CreateBackup(BigtableTableAdminRestStub): def __hash__(self): return hash("CreateBackup") @@ -3360,6 +3488,14 @@ def check_consistency( # In C++ this would require a dynamic_cast return self._CheckConsistency(self._session, self._host, self._interceptor) # type: ignore + @property + def copy_backup( + self, + ) -> Callable[[bigtable_table_admin.CopyBackupRequest], operations_pb2.Operation]: + # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. + # In C++ this would require a dynamic_cast + return self._CopyBackup(self._session, self._host, self._interceptor) # type: ignore + @property def create_backup( self, diff --git a/google/cloud/bigtable_admin_v2/types/__init__.py b/google/cloud/bigtable_admin_v2/types/__init__.py index c69e3129b..a2fefffc8 100644 --- a/google/cloud/bigtable_admin_v2/types/__init__.py +++ b/google/cloud/bigtable_admin_v2/types/__init__.py @@ -44,6 +44,8 @@ from .bigtable_table_admin import ( CheckConsistencyRequest, CheckConsistencyResponse, + CopyBackupMetadata, + CopyBackupRequest, CreateBackupMetadata, CreateBackupRequest, CreateTableFromSnapshotMetadata, @@ -130,6 +132,8 @@ "UpdateInstanceMetadata", "CheckConsistencyRequest", "CheckConsistencyResponse", + "CopyBackupMetadata", + "CopyBackupRequest", "CreateBackupMetadata", "CreateBackupRequest", "CreateTableFromSnapshotMetadata", diff --git a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py index dfa815dc2..2b108351a 100644 --- a/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ b/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py @@ -62,6 +62,8 @@ "DeleteBackupRequest", "ListBackupsRequest", "ListBackupsResponse", + "CopyBackupRequest", + "CopyBackupMetadata", }, ) @@ -76,8 +78,7 @@ class RestoreTableRequest(proto.Message): Attributes: parent (str): Required. The name of the instance in which to create the - restored table. This instance must be in the same project as - the source backup. Values are of the form + restored table. Values are of the form ``projects//instances/``. table_id (str): Required. The id of the table to create and restore to. This @@ -359,7 +360,7 @@ class ListTablesRequest(proto.Message): should be listed. Values are of the form ``projects/{project}/instances/{instance}``. view (google.cloud.bigtable_admin_v2.types.Table.View): - The view to be applied to the returned tables' fields. Only + The view to be applied to the returned tables' fields. NAME_ONLY view (default) and REPLICATION_VIEW are supported. page_size (int): Maximum number of results per page. @@ -1192,8 +1193,15 @@ class ListBackupsRequest(proto.Message): fields in [Backup][google.bigtable.admin.v2.Backup]. The full syntax is described at https://aip.dev/132#ordering. - Fields supported are: \* name \* source_table \* expire_time - \* start_time \* end_time \* size_bytes \* state + Fields supported are: + + - name + - source_table + - expire_time + - start_time + - end_time + - size_bytes + - state For example, "start_time". The default sorting order is ascending. To specify descending order for the field, a @@ -1266,4 +1274,90 @@ def raw_page(self): ) +class CopyBackupRequest(proto.Message): + r"""The request for + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + + Attributes: + parent (str): + Required. The name of the destination cluster that will + contain the backup copy. The cluster must already exists. + Values are of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}``. + backup_id (str): + Required. The id of the new backup. The ``backup_id`` along + with ``parent`` are combined as {parent}/backups/{backup_id} + to create the full backup name, of the form: + ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. + This string must be between 1 and 50 characters in length + and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. + source_backup (str): + Required. The source backup to be copied from. The source + backup needs to be in READY state for it to be copied. + Copying a copied backup is not allowed. Once CopyBackup is + in progress, the source backup cannot be deleted or cleaned + up on expiration until CopyBackup is finished. Values are of + the form: + ``projects//instances//clusters//backups/``. + expire_time (google.protobuf.timestamp_pb2.Timestamp): + Required. Required. The expiration time of the copied backup + with microsecond granularity that must be at least 6 hours + and at most 30 days from the time the request is received. + Once the ``expire_time`` has passed, Cloud Bigtable will + delete the backup and free the resources used by the backup. + """ + + parent: str = proto.Field( + proto.STRING, + number=1, + ) + backup_id: str = proto.Field( + proto.STRING, + number=2, + ) + source_backup: str = proto.Field( + proto.STRING, + number=3, + ) + expire_time: timestamp_pb2.Timestamp = proto.Field( + proto.MESSAGE, + number=4, + message=timestamp_pb2.Timestamp, + ) + + +class CopyBackupMetadata(proto.Message): + r"""Metadata type for the google.longrunning.Operation returned by + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. + + Attributes: + name (str): + The name of the backup being created through the copy + operation. Values are of the form + ``projects//instances//clusters//backups/``. + source_backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): + Information about the source backup that is + being copied from. + progress (google.cloud.bigtable_admin_v2.types.OperationProgress): + The progress of the + [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup] + operation. + """ + + name: str = proto.Field( + proto.STRING, + number=1, + ) + source_backup_info: gba_table.BackupInfo = proto.Field( + proto.MESSAGE, + number=2, + message=gba_table.BackupInfo, + ) + progress: common.OperationProgress = proto.Field( + proto.MESSAGE, + number=3, + message=common.OperationProgress, + ) + + __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/google/cloud/bigtable_admin_v2/types/table.py b/google/cloud/bigtable_admin_v2/types/table.py index e75ac00bb..6b885203d 100644 --- a/google/cloud/bigtable_admin_v2/types/table.py +++ b/google/cloud/bigtable_admin_v2/types/table.py @@ -124,7 +124,8 @@ class Table(proto.Message): ``REPLICATION_VIEW``, ``ENCRYPTION_VIEW``, ``FULL`` column_families (MutableMapping[str, google.cloud.bigtable_admin_v2.types.ColumnFamily]): The column families configured for this table, mapped by - column family ID. Views: ``SCHEMA_VIEW``, ``FULL`` + column family ID. Views: ``SCHEMA_VIEW``, ``STATS_VIEW``, + ``FULL`` granularity (google.cloud.bigtable_admin_v2.types.Table.TimestampGranularity): Immutable. The granularity (i.e. ``MILLIS``) at which timestamps are stored in this table. Timestamps not matching @@ -141,15 +142,16 @@ class Table(proto.Message): this table. Otherwise, the change stream is disabled and the change stream is not retained. deletion_protection (bool): - Set to true to make the table protected - against data loss. i.e. deleting the following - resources through Admin APIs are prohibited: - - - The table. - - The column families in the table. - - The instance containing the table. - Note one can still delete the data stored in the - table through Data APIs. + Set to true to make the table protected against data loss. + i.e. deleting the following resources through Admin APIs are + prohibited: + + - The table. + - The column families in the table. + - The instance containing the table. + + Note one can still delete the data stored in the table + through Data APIs. """ class TimestampGranularity(proto.Enum): @@ -487,8 +489,7 @@ class Snapshot(proto.Message): Attributes: name (str): - Output only. The unique name of the snapshot. Values are of - the form + The unique name of the snapshot. Values are of the form ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. source_table (google.cloud.bigtable_admin_v2.types.Table): Output only. The source table at the time the @@ -503,16 +504,15 @@ class Snapshot(proto.Message): Output only. The time when the snapshot is created. delete_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time when the snapshot will - be deleted. The maximum amount of time a - snapshot can stay active is 365 days. If 'ttl' - is not specified, the default maximum of 365 - days will be used. + The time when the snapshot will be deleted. + The maximum amount of time a snapshot can stay + active is 365 days. If 'ttl' is not specified, + the default maximum of 365 days will be used. state (google.cloud.bigtable_admin_v2.types.Snapshot.State): Output only. The current state of the snapshot. description (str): - Output only. Description of the snapshot. + Description of the snapshot. """ class State(proto.Enum): @@ -588,6 +588,12 @@ class Backup(proto.Message): backup was created. This needs to be in the same instance as the backup. Values are of the form ``projects/{project}/instances/{instance}/tables/{source_table}``. + source_backup (str): + Output only. Name of the backup from which + this backup was copied. If a backup is not + created by copying a backup, this field will be + empty. Values are of the form: + projects//instances//backups/. expire_time (google.protobuf.timestamp_pb2.Timestamp): Required. The expiration time of the backup, with microseconds granularity that must be at least 6 hours and @@ -637,6 +643,10 @@ class State(proto.Enum): proto.STRING, number=2, ) + source_backup: str = proto.Field( + proto.STRING, + number=10, + ) expire_time: timestamp_pb2.Timestamp = proto.Field( proto.MESSAGE, number=3, @@ -685,6 +695,12 @@ class BackupInfo(proto.Message): source_table (str): Output only. Name of the table the backup was created from. + source_backup (str): + Output only. Name of the backup from which + this backup was copied. If a backup is not + created by copying a backup, this field will be + empty. Values are of the form: + projects//instances//backups/. """ backup: str = proto.Field( @@ -705,6 +721,10 @@ class BackupInfo(proto.Message): proto.STRING, number=4, ) + source_backup: str = proto.Field( + proto.STRING, + number=10, + ) __all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/scripts/fixup_bigtable_admin_v2_keywords.py b/scripts/fixup_bigtable_admin_v2_keywords.py index 58eab8bcf..6882feaf6 100644 --- a/scripts/fixup_bigtable_admin_v2_keywords.py +++ b/scripts/fixup_bigtable_admin_v2_keywords.py @@ -40,6 +40,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer): CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { 'check_consistency': ('name', 'consistency_token', ), + 'copy_backup': ('parent', 'backup_id', 'source_backup', 'expire_time', ), 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), 'create_backup': ('parent', 'backup_id', 'backup', ), 'create_cluster': ('parent', 'cluster_id', 'cluster', ), diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 0a0b3b671..a537cb9f0 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -4960,6 +4960,7 @@ def test_get_backup(request_type, transport: str = "grpc"): call.return_value = table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -4974,6 +4975,7 @@ def test_get_backup(request_type, transport: str = "grpc"): assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -5014,6 +5016,7 @@ async def test_get_backup_async( table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -5029,6 +5032,7 @@ async def test_get_backup_async( assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -5200,6 +5204,7 @@ def test_update_backup(request_type, transport: str = "grpc"): call.return_value = table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -5214,6 +5219,7 @@ def test_update_backup(request_type, transport: str = "grpc"): assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -5255,6 +5261,7 @@ async def test_update_backup_async( table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -5270,6 +5277,7 @@ async def test_update_backup_async( assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -6217,6 +6225,262 @@ async def test_restore_table_field_headers_async(): ) in kw["metadata"] +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CopyBackupRequest, + dict, + ], +) +def test_copy_backup(request_type, transport: str = "grpc"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/spam") + response = client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +def test_copy_backup_empty_call(): + # This test is a coverage failsafe to make sure that totally empty calls, + # i.e. request == None and no flattened fields passed, work. + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="grpc", + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + client.copy_backup() + call.assert_called() + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + + +@pytest.mark.asyncio +async def test_copy_backup_async( + transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CopyBackupRequest +): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Everything is optional in proto3 as far as the runtime is concerned, + # and we are mocking out the actual API, so just send an empty request. + request = request_type() + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + response = await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == bigtable_table_admin.CopyBackupRequest() + + # Establish that the response is the type that we expect. + assert isinstance(response, future.Future) + + +@pytest.mark.asyncio +async def test_copy_backup_async_from_dict(): + await test_copy_backup_async(request_type=dict) + + +def test_copy_backup_field_headers(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CopyBackupRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value = operations_pb2.Operation(name="operations/op") + client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +@pytest.mark.asyncio +async def test_copy_backup_field_headers_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Any value that is part of the HTTP/1.1 URI should be sent as + # a field header. Set these to a non-empty value. + request = bigtable_table_admin.CopyBackupRequest() + + request.parent = "parent_value" + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/op") + ) + await client.copy_backup(request) + + # Establish that the underlying gRPC stub method was called. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + assert args[0] == request + + # Establish that the field header was sent. + _, _, kw = call.mock_calls[0] + assert ( + "x-goog-request-params", + "parent=parent_value", + ) in kw["metadata"] + + +def test_copy_backup_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + client.copy_backup( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) == 1 + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].source_backup + mock_val = "source_backup_value" + assert arg == mock_val + assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( + seconds=751 + ) + + +def test_copy_backup_flattened_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +@pytest.mark.asyncio +async def test_copy_backup_flattened_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Mock the actual call within the gRPC stub, and fake the request. + with mock.patch.object(type(client.transport.copy_backup), "__call__") as call: + # Designate an appropriate return value for the call. + call.return_value = operations_pb2.Operation(name="operations/op") + + call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( + operations_pb2.Operation(name="operations/spam") + ) + # Call the method with a truthy value for each flattened field, + # using the keyword arguments to the method. + response = await client.copy_backup( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(call.mock_calls) + _, args, _ = call.mock_calls[0] + arg = args[0].parent + mock_val = "parent_value" + assert arg == mock_val + arg = args[0].backup_id + mock_val = "backup_id_value" + assert arg == mock_val + arg = args[0].source_backup + mock_val = "source_backup_value" + assert arg == mock_val + assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp( + seconds=751 + ) + + +@pytest.mark.asyncio +async def test_copy_backup_flattened_error_async(): + client = BigtableTableAdminAsyncClient( + credentials=ga_credentials.AnonymousCredentials(), + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + await client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + @pytest.mark.parametrize( "request_type", [ @@ -8206,6 +8470,7 @@ def test_update_table_rest(request_type): "start_time": {"seconds": 751, "nanos": 543}, "end_time": {}, "source_table": "source_table_value", + "source_backup": "source_backup_value", }, }, "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, @@ -8404,6 +8669,7 @@ def test_update_table_rest_bad_request( "start_time": {"seconds": 751, "nanos": 543}, "end_time": {}, "source_table": "source_table_value", + "source_backup": "source_backup_value", }, }, "change_stream_config": {"retention_period": {"seconds": 751, "nanos": 543}}, @@ -11246,6 +11512,7 @@ def test_create_backup_rest(request_type): request_init["backup"] = { "name": "name_value", "source_table": "source_table_value", + "source_backup": "source_backup_value", "expire_time": {"seconds": 751, "nanos": 543}, "start_time": {}, "end_time": {}, @@ -11467,6 +11734,7 @@ def test_create_backup_rest_bad_request( request_init["backup"] = { "name": "name_value", "source_table": "source_table_value", + "source_backup": "source_backup_value", "expire_time": {"seconds": 751, "nanos": 543}, "start_time": {}, "end_time": {}, @@ -11593,6 +11861,7 @@ def test_get_backup_rest(request_type): return_value = table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -11611,6 +11880,7 @@ def test_get_backup_rest(request_type): assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -11868,6 +12138,7 @@ def test_update_backup_rest(request_type): request_init["backup"] = { "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", "source_table": "source_table_value", + "source_backup": "source_backup_value", "expire_time": {"seconds": 751, "nanos": 543}, "start_time": {}, "end_time": {}, @@ -11896,6 +12167,7 @@ def test_update_backup_rest(request_type): return_value = table.Backup( name="name_value", source_table="source_table_value", + source_backup="source_backup_value", size_bytes=1089, state=table.Backup.State.CREATING, ) @@ -11914,6 +12186,7 @@ def test_update_backup_rest(request_type): assert isinstance(response, table.Backup) assert response.name == "name_value" assert response.source_table == "source_table_value" + assert response.source_backup == "source_backup_value" assert response.size_bytes == 1089 assert response.state == table.Backup.State.CREATING @@ -12082,6 +12355,7 @@ def test_update_backup_rest_bad_request( request_init["backup"] = { "name": "projects/sample1/instances/sample2/clusters/sample3/backups/sample4", "source_table": "source_table_value", + "source_backup": "source_backup_value", "expire_time": {"seconds": 751, "nanos": 543}, "start_time": {}, "end_time": {}, @@ -13012,6 +13286,296 @@ def test_restore_table_rest_error(): ) +@pytest.mark.parametrize( + "request_type", + [ + bigtable_table_admin.CopyBackupRequest, + dict, + ], +) +def test_copy_backup_rest(request_type): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + response = client.copy_backup(request) + + # Establish that the response is the type that we expect. + assert response.operation.name == "operations/spam" + + +def test_copy_backup_rest_required_fields( + request_type=bigtable_table_admin.CopyBackupRequest, +): + transport_class = transports.BigtableTableAdminRestTransport + + request_init = {} + request_init["parent"] = "" + request_init["backup_id"] = "" + request_init["source_backup"] = "" + request = request_type(**request_init) + pb_request = request_type.pb(request) + jsonified_request = json.loads( + json_format.MessageToJson( + pb_request, + including_default_value_fields=False, + use_integers_for_enums=False, + ) + ) + + # verify fields with default values are dropped + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with default values are now present + + jsonified_request["parent"] = "parent_value" + jsonified_request["backupId"] = "backup_id_value" + jsonified_request["sourceBackup"] = "source_backup_value" + + unset_fields = transport_class( + credentials=ga_credentials.AnonymousCredentials() + ).copy_backup._get_unset_required_fields(jsonified_request) + jsonified_request.update(unset_fields) + + # verify required fields with non-default values are left alone + assert "parent" in jsonified_request + assert jsonified_request["parent"] == "parent_value" + assert "backupId" in jsonified_request + assert jsonified_request["backupId"] == "backup_id_value" + assert "sourceBackup" in jsonified_request + assert jsonified_request["sourceBackup"] == "source_backup_value" + + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + request = request_type(**request_init) + + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + # Mock the http request call within the method and fake a response. + with mock.patch.object(Session, "request") as req: + # We need to mock transcode() because providing default values + # for required fields will fail the real version if the http_options + # expect actual values for those fields. + with mock.patch.object(path_template, "transcode") as transcode: + # A uri without fields and an empty body will force all the + # request fields to show up in the query_params. + pb_request = request_type.pb(request) + transcode_result = { + "uri": "v1/sample_method", + "method": "post", + "query_params": pb_request, + } + transcode_result["body"] = pb_request + transcode.return_value = transcode_result + + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + response = client.copy_backup(request) + + expected_params = [("$alt", "json;enum-encoding=int")] + actual_params = req.call_args.kwargs["params"] + assert expected_params == actual_params + + +def test_copy_backup_rest_unset_required_fields(): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials + ) + + unset_fields = transport.copy_backup._get_unset_required_fields({}) + assert set(unset_fields) == ( + set(()) + & set( + ( + "parent", + "backupId", + "sourceBackup", + "expireTime", + ) + ) + ) + + +@pytest.mark.parametrize("null_interceptor", [True, False]) +def test_copy_backup_rest_interceptors(null_interceptor): + transport = transports.BigtableTableAdminRestTransport( + credentials=ga_credentials.AnonymousCredentials(), + interceptor=None + if null_interceptor + else transports.BigtableTableAdminRestInterceptor(), + ) + client = BigtableTableAdminClient(transport=transport) + with mock.patch.object( + type(client.transport._session), "request" + ) as req, mock.patch.object( + path_template, "transcode" + ) as transcode, mock.patch.object( + operation.Operation, "_set_result_from_operation" + ), mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "post_copy_backup" + ) as post, mock.patch.object( + transports.BigtableTableAdminRestInterceptor, "pre_copy_backup" + ) as pre: + pre.assert_not_called() + post.assert_not_called() + pb_message = bigtable_table_admin.CopyBackupRequest.pb( + bigtable_table_admin.CopyBackupRequest() + ) + transcode.return_value = { + "method": "post", + "uri": "my_uri", + "body": pb_message, + "query_params": pb_message, + } + + req.return_value = Response() + req.return_value.status_code = 200 + req.return_value.request = PreparedRequest() + req.return_value._content = json_format.MessageToJson( + operations_pb2.Operation() + ) + + request = bigtable_table_admin.CopyBackupRequest() + metadata = [ + ("key", "val"), + ("cephalopod", "squid"), + ] + pre.return_value = request, metadata + post.return_value = operations_pb2.Operation() + + client.copy_backup( + request, + metadata=[ + ("key", "val"), + ("cephalopod", "squid"), + ], + ) + + pre.assert_called_once() + post.assert_called_once() + + +def test_copy_backup_rest_bad_request( + transport: str = "rest", request_type=bigtable_table_admin.CopyBackupRequest +): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # send a request that will satisfy transcoding + request_init = {"parent": "projects/sample1/instances/sample2/clusters/sample3"} + request = request_type(**request_init) + + # Mock the http request call within the method and fake a BadRequest error. + with mock.patch.object(Session, "request") as req, pytest.raises( + core_exceptions.BadRequest + ): + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 400 + response_value.request = Request() + req.return_value = response_value + client.copy_backup(request) + + +def test_copy_backup_rest_flattened(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport="rest", + ) + + # Mock the http request call within the method and fake a response. + with mock.patch.object(type(client.transport._session), "request") as req: + # Designate an appropriate value for the returned response. + return_value = operations_pb2.Operation(name="operations/spam") + + # get arguments that satisfy an http rule for this method + sample_request = { + "parent": "projects/sample1/instances/sample2/clusters/sample3" + } + + # get truthy value for each flattened field + mock_args = dict( + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + mock_args.update(sample_request) + + # Wrap the value into a proper Response obj + response_value = Response() + response_value.status_code = 200 + json_return_value = json_format.MessageToJson(return_value) + response_value._content = json_return_value.encode("UTF-8") + req.return_value = response_value + + client.copy_backup(**mock_args) + + # Establish that the underlying call was made with the expected + # request object values. + assert len(req.mock_calls) == 1 + _, args, _ = req.mock_calls[0] + assert path_template.validate( + "%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy" + % client.transport._host, + args[1], + ) + + +def test_copy_backup_rest_flattened_error(transport: str = "rest"): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), + transport=transport, + ) + + # Attempting to call a method with both a request object and flattened + # fields is an error. + with pytest.raises(ValueError): + client.copy_backup( + bigtable_table_admin.CopyBackupRequest(), + parent="parent_value", + backup_id="backup_id_value", + source_backup="source_backup_value", + expire_time=timestamp_pb2.Timestamp(seconds=751), + ) + + +def test_copy_backup_rest_error(): + client = BigtableTableAdminClient( + credentials=ga_credentials.AnonymousCredentials(), transport="rest" + ) + + @pytest.mark.parametrize( "request_type", [ @@ -14001,6 +14565,7 @@ def test_bigtable_table_admin_base_transport(): "delete_backup", "list_backups", "restore_table", + "copy_backup", "get_iam_policy", "set_iam_policy", "test_iam_permissions", @@ -14377,6 +14942,9 @@ def test_bigtable_table_admin_client_transport_session_collision(transport_name) session1 = client1.transport.restore_table._session session2 = client2.transport.restore_table._session assert session1 != session2 + session1 = client1.transport.copy_backup._session + session2 = client2.transport.copy_backup._session + assert session1 != session2 session1 = client1.transport.get_iam_policy._session session2 = client2.transport.get_iam_policy._session assert session1 != session2