diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index b6e77aaea..18e125047 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging as std_logging from collections import OrderedDict import re from typing import ( @@ -58,6 +59,15 @@ from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport from .client import BigtableInstanceAdminClient +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class BigtableInstanceAdminAsyncClient: """Service for creating, configuring, and deleting Cloud @@ -289,6 +299,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable.admin_v2.BigtableInstanceAdminAsyncClient`.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "credentialsType": None, + }, + ) + async def create_instance( self, request: Optional[ @@ -301,7 +333,7 @@ async def create_instance( clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Create an instance within a project. @@ -352,8 +384,10 @@ async def create_instance( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -436,7 +470,7 @@ async def get_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Gets information about an instance. @@ -455,8 +489,10 @@ async def get_instance( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Instance: @@ -522,7 +558,7 @@ async def list_instances( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Lists information about instances in a project. @@ -541,8 +577,10 @@ async def list_instances( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.ListInstancesResponse: @@ -602,7 +640,7 @@ async def update_instance( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Updates an instance within a project. This method updates only the display name and type for an Instance. @@ -620,8 +658,10 @@ async def update_instance( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Instance: @@ -674,7 +714,7 @@ async def partial_update_instance( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Partially updates an instance within a project. This method can modify all fields of an Instance and is the @@ -702,8 +742,10 @@ async def partial_update_instance( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -785,7 +827,7 @@ async def delete_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Delete an instance from a project. @@ -804,8 +846,10 @@ async def delete_instance( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -861,7 +905,7 @@ async def create_cluster( cluster: Optional[instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a cluster within an instance. @@ -902,8 +946,10 @@ async def create_cluster( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -982,7 +1028,7 @@ async def get_cluster( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Cluster: r"""Gets information about a cluster. @@ -1001,8 +1047,10 @@ async def get_cluster( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Cluster: @@ -1067,7 +1115,7 @@ async def list_clusters( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Lists information about clusters in an instance. @@ -1088,8 +1136,10 @@ async def list_clusters( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.ListClustersResponse: @@ -1149,7 +1199,7 @@ async def update_cluster( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates a cluster within an instance. @@ -1166,8 +1216,10 @@ async def update_cluster( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1229,7 +1281,7 @@ async def partial_update_cluster( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Partially updates a cluster within a project. This method is the preferred way to update a Cluster. @@ -1267,8 +1319,10 @@ async def partial_update_cluster( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1347,7 +1401,7 @@ async def delete_cluster( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a cluster from an instance. @@ -1366,8 +1420,10 @@ async def delete_cluster( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -1423,7 +1479,7 @@ async def create_app_profile( app_profile: Optional[instance.AppProfile] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Creates an app profile within an instance. @@ -1458,8 +1514,10 @@ async def create_app_profile( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AppProfile: @@ -1527,7 +1585,7 @@ async def get_app_profile( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Gets information about an app profile. @@ -1546,8 +1604,10 @@ async def get_app_profile( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AppProfile: @@ -1611,7 +1671,7 @@ async def list_app_profiles( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAppProfilesAsyncPager: r"""Lists information about app profiles in an instance. @@ -1633,8 +1693,10 @@ async def list_app_profiles( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesAsyncPager: @@ -1712,7 +1774,7 @@ async def update_app_profile( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates an app profile within an instance. @@ -1738,8 +1800,10 @@ async def update_app_profile( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1816,7 +1880,7 @@ async def delete_app_profile( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes an app profile from an instance. @@ -1835,8 +1899,10 @@ async def delete_app_profile( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -1888,7 +1954,7 @@ async def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance resource. Returns an empty policy if an instance exists @@ -1909,8 +1975,10 @@ async def get_iam_policy( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -1996,7 +2064,7 @@ async def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance resource. Replaces any existing policy. @@ -2016,8 +2084,10 @@ async def set_iam_policy( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2104,7 +2174,7 @@ async def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified instance resource. @@ -2133,8 +2203,10 @@ async def test_iam_permissions( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: @@ -2194,7 +2266,7 @@ async def list_hot_tablets( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListHotTabletsAsyncPager: r"""Lists hot tablets in a cluster, within the time range provided. Hot tablets are ordered based on CPU usage. @@ -2214,8 +2286,10 @@ async def list_hot_tablets( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsAsyncPager: diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index b717eac8b..db6c9f361 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -14,6 +14,7 @@ # limitations under the License. # from collections import OrderedDict +import logging as std_logging import os import re from typing import ( @@ -48,6 +49,15 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers @@ -707,6 +717,10 @@ def __init__( # Initialize the universe domain validation. self._is_universe_domain_valid = False + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( @@ -773,6 +787,29 @@ def __init__( api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable.admin_v2.BigtableInstanceAdminClient`.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "credentialsType": None, + }, + ) + def create_instance( self, request: Optional[ @@ -785,7 +822,7 @@ def create_instance( clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Create an instance within a project. @@ -836,8 +873,10 @@ def create_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -916,7 +955,7 @@ def get_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Gets information about an instance. @@ -935,8 +974,10 @@ def get_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Instance: @@ -999,7 +1040,7 @@ def list_instances( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Lists information about instances in a project. @@ -1018,8 +1059,10 @@ def list_instances( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.ListInstancesResponse: @@ -1076,7 +1119,7 @@ def update_instance( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Updates an instance within a project. This method updates only the display name and type for an Instance. @@ -1094,8 +1137,10 @@ def update_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Instance: @@ -1146,7 +1191,7 @@ def partial_update_instance( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Partially updates an instance within a project. This method can modify all fields of an Instance and is the @@ -1174,8 +1219,10 @@ def partial_update_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1254,7 +1301,7 @@ def delete_instance( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Delete an instance from a project. @@ -1273,8 +1320,10 @@ def delete_instance( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -1327,7 +1376,7 @@ def create_cluster( cluster: Optional[instance.Cluster] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a cluster within an instance. @@ -1368,8 +1417,10 @@ def create_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1445,7 +1496,7 @@ def get_cluster( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Cluster: r"""Gets information about a cluster. @@ -1464,8 +1515,10 @@ def get_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Cluster: @@ -1527,7 +1580,7 @@ def list_clusters( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Lists information about clusters in an instance. @@ -1548,8 +1601,10 @@ def list_clusters( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.ListClustersResponse: @@ -1606,7 +1661,7 @@ def update_cluster( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates a cluster within an instance. @@ -1623,8 +1678,10 @@ def update_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1684,7 +1741,7 @@ def partial_update_cluster( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Partially updates a cluster within a project. This method is the preferred way to update a Cluster. @@ -1722,8 +1779,10 @@ def partial_update_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1799,7 +1858,7 @@ def delete_cluster( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a cluster from an instance. @@ -1818,8 +1877,10 @@ def delete_cluster( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -1872,7 +1933,7 @@ def create_app_profile( app_profile: Optional[instance.AppProfile] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Creates an app profile within an instance. @@ -1907,8 +1968,10 @@ def create_app_profile( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AppProfile: @@ -1973,7 +2036,7 @@ def get_app_profile( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Gets information about an app profile. @@ -1992,8 +2055,10 @@ def get_app_profile( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AppProfile: @@ -2054,7 +2119,7 @@ def list_app_profiles( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAppProfilesPager: r"""Lists information about app profiles in an instance. @@ -2076,8 +2141,10 @@ def list_app_profiles( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesPager: @@ -2152,7 +2219,7 @@ def update_app_profile( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates an app profile within an instance. @@ -2178,8 +2245,10 @@ def update_app_profile( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2253,7 +2322,7 @@ def delete_app_profile( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes an app profile from an instance. @@ -2272,8 +2341,10 @@ def delete_app_profile( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -2322,7 +2393,7 @@ def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for an instance resource. Returns an empty policy if an instance exists @@ -2343,8 +2414,10 @@ def get_iam_policy( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2431,7 +2504,7 @@ def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on an instance resource. Replaces any existing policy. @@ -2451,8 +2524,10 @@ def set_iam_policy( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -2540,7 +2615,7 @@ def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified instance resource. @@ -2569,8 +2644,10 @@ def test_iam_permissions( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: @@ -2631,7 +2708,7 @@ def list_hot_tablets( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListHotTabletsPager: r"""Lists hot tablets in a cluster, within the time range provided. Hot tablets are ordered based on CPU usage. @@ -2651,8 +2728,10 @@ def list_hot_tablets( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsPager: diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py index bb7ee001f..98a8a5afb 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py @@ -67,7 +67,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -81,8 +81,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_instance_admin.ListAppProfilesRequest(request) @@ -143,7 +145,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -157,8 +159,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_instance_admin.ListAppProfilesRequest(request) @@ -223,7 +227,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -237,8 +241,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_instance_admin.ListHotTabletsRequest(request) @@ -299,7 +305,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -313,8 +319,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_instance_admin.ListHotTabletsRequest(request) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index cc3e70986..40f744e60 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union @@ -22,8 +25,11 @@ import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin from google.cloud.bigtable_admin_v2.types import instance @@ -33,6 +39,81 @@ from google.protobuf import empty_pb2 # type: ignore from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": client_call_details.method, + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableInstanceAdminGrpcTransport(BigtableInstanceAdminTransport): """gRPC backend transport for BigtableInstanceAdmin. @@ -190,7 +271,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -254,7 +340,9 @@ def operations_client(self) -> operations_v1.OperationsClient: """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) # Return the client from cache. return self._operations_client @@ -286,7 +374,7 @@ def create_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_instance" not in self._stubs: - self._stubs["create_instance"] = self.grpc_channel.unary_unary( + self._stubs["create_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -312,7 +400,7 @@ def get_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_instance" not in self._stubs: - self._stubs["get_instance"] = self.grpc_channel.unary_unary( + self._stubs["get_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, response_deserializer=instance.Instance.deserialize, @@ -341,7 +429,7 @@ def list_instances( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_instances" not in self._stubs: - self._stubs["list_instances"] = self.grpc_channel.unary_unary( + self._stubs["list_instances"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, @@ -368,7 +456,7 @@ def update_instance(self) -> Callable[[instance.Instance], instance.Instance]: # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_instance" not in self._stubs: - self._stubs["update_instance"] = self.grpc_channel.unary_unary( + self._stubs["update_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", request_serializer=instance.Instance.serialize, response_deserializer=instance.Instance.deserialize, @@ -398,7 +486,7 @@ def partial_update_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partial_update_instance" not in self._stubs: - self._stubs["partial_update_instance"] = self.grpc_channel.unary_unary( + self._stubs["partial_update_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -424,7 +512,7 @@ def delete_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_instance" not in self._stubs: - self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + self._stubs["delete_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -458,7 +546,7 @@ def create_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_cluster" not in self._stubs: - self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + self._stubs["create_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -484,7 +572,7 @@ def get_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_cluster" not in self._stubs: - self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + self._stubs["get_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, response_deserializer=instance.Cluster.deserialize, @@ -513,7 +601,7 @@ def list_clusters( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_clusters" not in self._stubs: - self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + self._stubs["list_clusters"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, @@ -541,7 +629,7 @@ def update_cluster(self) -> Callable[[instance.Cluster], operations_pb2.Operatio # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_cluster" not in self._stubs: - self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + self._stubs["update_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", request_serializer=instance.Cluster.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -582,7 +670,7 @@ def partial_update_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partial_update_cluster" not in self._stubs: - self._stubs["partial_update_cluster"] = self.grpc_channel.unary_unary( + self._stubs["partial_update_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster", request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -608,7 +696,7 @@ def delete_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_cluster" not in self._stubs: - self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + self._stubs["delete_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -636,7 +724,7 @@ def create_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_app_profile" not in self._stubs: - self._stubs["create_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["create_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, response_deserializer=instance.AppProfile.deserialize, @@ -662,7 +750,7 @@ def get_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_app_profile" not in self._stubs: - self._stubs["get_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["get_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, response_deserializer=instance.AppProfile.deserialize, @@ -691,7 +779,7 @@ def list_app_profiles( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_app_profiles" not in self._stubs: - self._stubs["list_app_profiles"] = self.grpc_channel.unary_unary( + self._stubs["list_app_profiles"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, @@ -719,7 +807,7 @@ def update_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_app_profile" not in self._stubs: - self._stubs["update_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["update_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -745,7 +833,7 @@ def delete_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_app_profile" not in self._stubs: - self._stubs["delete_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["delete_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -773,7 +861,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -800,7 +888,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -830,7 +918,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -860,7 +948,7 @@ def list_hot_tablets( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_hot_tablets" not in self._stubs: - self._stubs["list_hot_tablets"] = self.grpc_channel.unary_unary( + self._stubs["list_hot_tablets"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets", request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, @@ -868,7 +956,7 @@ def list_hot_tablets( return self._stubs["list_hot_tablets"] def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 716e14a86..dc51a5b07 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -14,6 +14,9 @@ # limitations under the License. # import inspect +import json +import pickle +import logging as std_logging import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union @@ -24,8 +27,11 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin @@ -37,6 +43,82 @@ from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO from .grpc import BigtableInstanceAdminGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableInstanceAdminGrpcAsyncIOTransport(BigtableInstanceAdminTransport): """gRPC AsyncIO backend transport for BigtableInstanceAdmin. @@ -237,10 +319,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel self._wrap_with_kind = ( "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -263,7 +348,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel + self._logged_channel ) # Return the client from cache. @@ -297,7 +382,7 @@ def create_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_instance" not in self._stubs: - self._stubs["create_instance"] = self.grpc_channel.unary_unary( + self._stubs["create_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance", request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -325,7 +410,7 @@ def get_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_instance" not in self._stubs: - self._stubs["get_instance"] = self.grpc_channel.unary_unary( + self._stubs["get_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance", request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, response_deserializer=instance.Instance.deserialize, @@ -354,7 +439,7 @@ def list_instances( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_instances" not in self._stubs: - self._stubs["list_instances"] = self.grpc_channel.unary_unary( + self._stubs["list_instances"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances", request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, @@ -383,7 +468,7 @@ def update_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_instance" not in self._stubs: - self._stubs["update_instance"] = self.grpc_channel.unary_unary( + self._stubs["update_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance", request_serializer=instance.Instance.serialize, response_deserializer=instance.Instance.deserialize, @@ -414,7 +499,7 @@ def partial_update_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partial_update_instance" not in self._stubs: - self._stubs["partial_update_instance"] = self.grpc_channel.unary_unary( + self._stubs["partial_update_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance", request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -442,7 +527,7 @@ def delete_instance( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_instance" not in self._stubs: - self._stubs["delete_instance"] = self.grpc_channel.unary_unary( + self._stubs["delete_instance"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance", request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -477,7 +562,7 @@ def create_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_cluster" not in self._stubs: - self._stubs["create_cluster"] = self.grpc_channel.unary_unary( + self._stubs["create_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster", request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -505,7 +590,7 @@ def get_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_cluster" not in self._stubs: - self._stubs["get_cluster"] = self.grpc_channel.unary_unary( + self._stubs["get_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster", request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, response_deserializer=instance.Cluster.deserialize, @@ -534,7 +619,7 @@ def list_clusters( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_clusters" not in self._stubs: - self._stubs["list_clusters"] = self.grpc_channel.unary_unary( + self._stubs["list_clusters"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters", request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, @@ -564,7 +649,7 @@ def update_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_cluster" not in self._stubs: - self._stubs["update_cluster"] = self.grpc_channel.unary_unary( + self._stubs["update_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster", request_serializer=instance.Cluster.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -606,7 +691,7 @@ def partial_update_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "partial_update_cluster" not in self._stubs: - self._stubs["partial_update_cluster"] = self.grpc_channel.unary_unary( + self._stubs["partial_update_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster", request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -634,7 +719,7 @@ def delete_cluster( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_cluster" not in self._stubs: - self._stubs["delete_cluster"] = self.grpc_channel.unary_unary( + self._stubs["delete_cluster"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster", request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -663,7 +748,7 @@ def create_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_app_profile" not in self._stubs: - self._stubs["create_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["create_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile", request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, response_deserializer=instance.AppProfile.deserialize, @@ -691,7 +776,7 @@ def get_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_app_profile" not in self._stubs: - self._stubs["get_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["get_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile", request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, response_deserializer=instance.AppProfile.deserialize, @@ -720,7 +805,7 @@ def list_app_profiles( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_app_profiles" not in self._stubs: - self._stubs["list_app_profiles"] = self.grpc_channel.unary_unary( + self._stubs["list_app_profiles"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles", request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, @@ -749,7 +834,7 @@ def update_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_app_profile" not in self._stubs: - self._stubs["update_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["update_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile", request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -777,7 +862,7 @@ def delete_app_profile( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_app_profile" not in self._stubs: - self._stubs["delete_app_profile"] = self.grpc_channel.unary_unary( + self._stubs["delete_app_profile"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile", request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -805,7 +890,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -832,7 +917,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -862,7 +947,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -892,7 +977,7 @@ def list_hot_tablets( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_hot_tablets" not in self._stubs: - self._stubs["list_hot_tablets"] = self.grpc_channel.unary_unary( + self._stubs["list_hot_tablets"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets", request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, @@ -1145,7 +1230,7 @@ def _wrap_method(self, func, *args, **kwargs): return gapic_v1.method_async.wrap_method(func, *args, **kwargs) def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() @property def kind(self) -> str: diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 45f08fa64..dbd7d604b 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging +import json # type: ignore from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries @@ -48,6 +49,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, @@ -236,9 +245,10 @@ def post_update_instance(self, response): def pre_create_app_profile( self, request: bigtable_instance_admin.CreateAppProfileRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.CreateAppProfileRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.CreateAppProfileRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_app_profile @@ -261,8 +271,11 @@ def post_create_app_profile( def pre_create_cluster( self, request: bigtable_instance_admin.CreateClusterRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.CreateClusterRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.CreateClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for create_cluster Override in a subclass to manipulate the request or metadata @@ -284,9 +297,10 @@ def post_create_cluster( def pre_create_instance( self, request: bigtable_instance_admin.CreateInstanceRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.CreateInstanceRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.CreateInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_instance @@ -309,9 +323,10 @@ def post_create_instance( def pre_delete_app_profile( self, request: bigtable_instance_admin.DeleteAppProfileRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.DeleteAppProfileRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.DeleteAppProfileRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for delete_app_profile @@ -323,8 +338,11 @@ def pre_delete_app_profile( def pre_delete_cluster( self, request: bigtable_instance_admin.DeleteClusterRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.DeleteClusterRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.DeleteClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for delete_cluster Override in a subclass to manipulate the request or metadata @@ -335,9 +353,10 @@ def pre_delete_cluster( def pre_delete_instance( self, request: bigtable_instance_admin.DeleteInstanceRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.DeleteInstanceRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.DeleteInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for delete_instance @@ -349,8 +368,11 @@ def pre_delete_instance( def pre_get_app_profile( self, request: bigtable_instance_admin.GetAppProfileRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.GetAppProfileRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.GetAppProfileRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for get_app_profile Override in a subclass to manipulate the request or metadata @@ -372,8 +394,11 @@ def post_get_app_profile( def pre_get_cluster( self, request: bigtable_instance_admin.GetClusterRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.GetClusterRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.GetClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for get_cluster Override in a subclass to manipulate the request or metadata @@ -393,8 +418,10 @@ def post_get_cluster(self, response: instance.Cluster) -> instance.Cluster: def pre_get_iam_policy( self, request: iam_policy_pb2.GetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_iam_policy Override in a subclass to manipulate the request or metadata @@ -414,8 +441,11 @@ def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: def pre_get_instance( self, request: bigtable_instance_admin.GetInstanceRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.GetInstanceRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.GetInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for get_instance Override in a subclass to manipulate the request or metadata @@ -435,9 +465,10 @@ def post_get_instance(self, response: instance.Instance) -> instance.Instance: def pre_list_app_profiles( self, request: bigtable_instance_admin.ListAppProfilesRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.ListAppProfilesRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.ListAppProfilesRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_app_profiles @@ -460,8 +491,11 @@ def post_list_app_profiles( def pre_list_clusters( self, request: bigtable_instance_admin.ListClustersRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.ListClustersRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListClustersRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for list_clusters Override in a subclass to manipulate the request or metadata @@ -483,9 +517,10 @@ def post_list_clusters( def pre_list_hot_tablets( self, request: bigtable_instance_admin.ListHotTabletsRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.ListHotTabletsRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.ListHotTabletsRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_hot_tablets @@ -508,8 +543,11 @@ def post_list_hot_tablets( def pre_list_instances( self, request: bigtable_instance_admin.ListInstancesRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_instance_admin.ListInstancesRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_instance_admin.ListInstancesRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for list_instances Override in a subclass to manipulate the request or metadata @@ -531,9 +569,10 @@ def post_list_instances( def pre_partial_update_cluster( self, request: bigtable_instance_admin.PartialUpdateClusterRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.PartialUpdateClusterRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.PartialUpdateClusterRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for partial_update_cluster @@ -556,9 +595,10 @@ def post_partial_update_cluster( def pre_partial_update_instance( self, request: bigtable_instance_admin.PartialUpdateInstanceRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.PartialUpdateInstanceRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.PartialUpdateInstanceRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for partial_update_instance @@ -581,8 +621,10 @@ def post_partial_update_instance( def pre_set_iam_policy( self, request: iam_policy_pb2.SetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for set_iam_policy Override in a subclass to manipulate the request or metadata @@ -602,8 +644,11 @@ def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: def pre_test_iam_permissions( self, request: iam_policy_pb2.TestIamPermissionsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for test_iam_permissions Override in a subclass to manipulate the request or metadata @@ -625,9 +670,10 @@ def post_test_iam_permissions( def pre_update_app_profile( self, request: bigtable_instance_admin.UpdateAppProfileRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_instance_admin.UpdateAppProfileRequest, Sequence[Tuple[str, str]] + bigtable_instance_admin.UpdateAppProfileRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for update_app_profile @@ -648,8 +694,10 @@ def post_update_app_profile( return response def pre_update_cluster( - self, request: instance.Cluster, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[instance.Cluster, Sequence[Tuple[str, str]]]: + self, + request: instance.Cluster, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.Cluster, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for update_cluster Override in a subclass to manipulate the request or metadata @@ -669,8 +717,10 @@ def post_update_cluster( return response def pre_update_instance( - self, request: instance.Instance, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[instance.Instance, Sequence[Tuple[str, str]]]: + self, + request: instance.Instance, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[instance.Instance, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for update_instance Override in a subclass to manipulate the request or metadata @@ -866,7 +916,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Call the create app profile method over HTTP. @@ -877,8 +927,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.instance.AppProfile: @@ -891,6 +943,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseCreateAppProfile._get_http_options() ) + request, metadata = self._interceptor.pre_create_app_profile( request, metadata ) @@ -907,6 +960,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateAppProfile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateAppProfile", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableInstanceAdminRestTransport._CreateAppProfile._get_response( @@ -930,7 +1010,29 @@ def __call__( pb_resp = instance.AppProfile.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_app_profile(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.AppProfile.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_app_profile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateAppProfile", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateCluster( @@ -969,7 +1071,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create cluster method over HTTP. @@ -980,8 +1082,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -994,6 +1098,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_http_options() ) + request, metadata = self._interceptor.pre_create_cluster(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateCluster._get_transcoded_request( http_options, request @@ -1008,6 +1113,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._CreateCluster._get_response( self._host, @@ -1027,7 +1159,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_cluster(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_cluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateInstance( @@ -1066,7 +1220,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create instance method over HTTP. @@ -1077,8 +1231,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1091,6 +1247,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_http_options() ) + request, metadata = self._interceptor.pre_create_instance(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseCreateInstance._get_transcoded_request( http_options, request @@ -1105,6 +1262,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.CreateInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._CreateInstance._get_response( self._host, @@ -1124,7 +1308,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_instance(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.create_instance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "CreateInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _DeleteAppProfile( @@ -1162,7 +1368,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete app profile method over HTTP. @@ -1173,13 +1379,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseDeleteAppProfile._get_http_options() ) + request, metadata = self._interceptor.pre_delete_app_profile( request, metadata ) @@ -1192,6 +1401,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteAppProfile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "DeleteAppProfile", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableInstanceAdminRestTransport._DeleteAppProfile._get_response( @@ -1244,7 +1480,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete cluster method over HTTP. @@ -1255,13 +1491,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_http_options() ) + request, metadata = self._interceptor.pre_delete_cluster(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteCluster._get_transcoded_request( http_options, request @@ -1272,6 +1511,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "DeleteCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._DeleteCluster._get_response( self._host, @@ -1322,7 +1588,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete instance method over HTTP. @@ -1333,13 +1599,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_http_options() ) + request, metadata = self._interceptor.pre_delete_instance(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseDeleteInstance._get_transcoded_request( http_options, request @@ -1350,6 +1619,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.DeleteInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "DeleteInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._DeleteInstance._get_response( self._host, @@ -1400,7 +1696,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.AppProfile: r"""Call the get app profile method over HTTP. @@ -1411,8 +1707,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.instance.AppProfile: @@ -1425,6 +1723,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_http_options() ) + request, metadata = self._interceptor.pre_get_app_profile(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetAppProfile._get_transcoded_request( http_options, request @@ -1435,6 +1734,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetAppProfile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetAppProfile", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._GetAppProfile._get_response( self._host, @@ -1455,7 +1781,29 @@ def __call__( pb_resp = instance.AppProfile.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_app_profile(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.AppProfile.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_app_profile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetAppProfile", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetCluster( @@ -1493,7 +1841,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Cluster: r"""Call the get cluster method over HTTP. @@ -1504,8 +1852,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.instance.Cluster: @@ -1519,6 +1869,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_http_options() ) + request, metadata = self._interceptor.pre_get_cluster(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetCluster._get_transcoded_request( http_options, request @@ -1529,6 +1880,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._GetCluster._get_response( self._host, @@ -1549,7 +1927,29 @@ def __call__( pb_resp = instance.Cluster.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_cluster(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.Cluster.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_cluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetIamPolicy( @@ -1588,7 +1988,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the get iam policy method over HTTP. @@ -1598,8 +1998,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -1684,6 +2086,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_http_options() ) + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetIamPolicy._get_transcoded_request( http_options, request @@ -1698,6 +2101,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetIamPolicy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._GetIamPolicy._get_response( self._host, @@ -1719,7 +2149,29 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_iam_policy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetInstance( @@ -1757,7 +2209,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Call the get instance method over HTTP. @@ -1768,8 +2220,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.instance.Instance: @@ -1785,6 +2239,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_http_options() ) + request, metadata = self._interceptor.pre_get_instance(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseGetInstance._get_transcoded_request( http_options, request @@ -1795,6 +2250,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.GetInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._GetInstance._get_response( self._host, @@ -1815,7 +2297,29 @@ def __call__( pb_resp = instance.Instance.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_instance(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.Instance.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.get_instance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "GetInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListAppProfiles( @@ -1853,7 +2357,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListAppProfilesResponse: r"""Call the list app profiles method over HTTP. @@ -1864,8 +2368,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_instance_admin.ListAppProfilesResponse: @@ -1877,6 +2383,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseListAppProfiles._get_http_options() ) + request, metadata = self._interceptor.pre_list_app_profiles( request, metadata ) @@ -1889,6 +2396,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListAppProfiles", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListAppProfiles", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableInstanceAdminRestTransport._ListAppProfiles._get_response( @@ -1911,7 +2445,33 @@ def __call__( pb_resp = bigtable_instance_admin.ListAppProfilesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_app_profiles(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListAppProfilesResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_app_profiles", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListAppProfiles", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListClusters( @@ -1949,7 +2509,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListClustersResponse: r"""Call the list clusters method over HTTP. @@ -1960,8 +2520,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_instance_admin.ListClustersResponse: @@ -1973,6 +2535,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_http_options() ) + request, metadata = self._interceptor.pre_list_clusters(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListClusters._get_transcoded_request( http_options, request @@ -1983,6 +2546,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListClusters", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListClusters", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._ListClusters._get_response( self._host, @@ -2003,7 +2593,31 @@ def __call__( pb_resp = bigtable_instance_admin.ListClustersResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_clusters(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListClustersResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_clusters", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListClusters", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListHotTablets( @@ -2041,7 +2655,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListHotTabletsResponse: r"""Call the list hot tablets method over HTTP. @@ -2052,8 +2666,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_instance_admin.ListHotTabletsResponse: @@ -2065,6 +2681,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseListHotTablets._get_http_options() ) + request, metadata = self._interceptor.pre_list_hot_tablets( request, metadata ) @@ -2077,6 +2694,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListHotTablets", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListHotTablets", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._ListHotTablets._get_response( self._host, @@ -2097,7 +2741,31 @@ def __call__( pb_resp = bigtable_instance_admin.ListHotTabletsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_hot_tablets(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListHotTabletsResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_hot_tablets", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListHotTablets", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListInstances( @@ -2135,7 +2803,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_instance_admin.ListInstancesResponse: r"""Call the list instances method over HTTP. @@ -2146,8 +2814,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_instance_admin.ListInstancesResponse: @@ -2159,6 +2829,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_http_options() ) + request, metadata = self._interceptor.pre_list_instances(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseListInstances._get_transcoded_request( http_options, request @@ -2169,6 +2840,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.ListInstances", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListInstances", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._ListInstances._get_response( self._host, @@ -2189,7 +2887,31 @@ def __call__( pb_resp = bigtable_instance_admin.ListInstancesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_instances(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_instance_admin.ListInstancesResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.list_instances", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "ListInstances", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _PartialUpdateCluster( @@ -2228,7 +2950,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the partial update cluster method over HTTP. @@ -2239,8 +2961,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2253,6 +2977,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateCluster._get_http_options() ) + request, metadata = self._interceptor.pre_partial_update_cluster( request, metadata ) @@ -2269,6 +2994,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.PartialUpdateCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "PartialUpdateCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableInstanceAdminRestTransport._PartialUpdateCluster._get_response( @@ -2290,7 +3042,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_partial_update_cluster(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.partial_update_cluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "PartialUpdateCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _PartialUpdateInstance( @@ -2329,7 +3103,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the partial update instance method over HTTP. @@ -2340,8 +3114,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2354,6 +3130,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BasePartialUpdateInstance._get_http_options() ) + request, metadata = self._interceptor.pre_partial_update_instance( request, metadata ) @@ -2370,6 +3147,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.PartialUpdateInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "PartialUpdateInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableInstanceAdminRestTransport._PartialUpdateInstance._get_response( @@ -2391,7 +3195,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_partial_update_instance(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.partial_update_instance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "PartialUpdateInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _SetIamPolicy( @@ -2430,7 +3256,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the set iam policy method over HTTP. @@ -2440,8 +3266,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -2526,6 +3354,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_http_options() ) + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseSetIamPolicy._get_transcoded_request( http_options, request @@ -2540,6 +3369,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.SetIamPolicy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._SetIamPolicy._get_response( self._host, @@ -2561,7 +3417,29 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_set_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.set_iam_policy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "SetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _TestIamPermissions( @@ -2600,7 +3478,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Call the test iam permissions method over HTTP. @@ -2610,8 +3488,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.iam_policy_pb2.TestIamPermissionsResponse: @@ -2621,6 +3501,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseTestIamPermissions._get_http_options() ) + request, metadata = self._interceptor.pre_test_iam_permissions( request, metadata ) @@ -2637,6 +3518,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.TestIamPermissions", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableInstanceAdminRestTransport._TestIamPermissions._get_response( @@ -2660,7 +3568,29 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_test_iam_permissions(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.test_iam_permissions", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "TestIamPermissions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateAppProfile( @@ -2699,7 +3629,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the update app profile method over HTTP. @@ -2710,8 +3640,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2724,6 +3656,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseUpdateAppProfile._get_http_options() ) + request, metadata = self._interceptor.pre_update_app_profile( request, metadata ) @@ -2740,6 +3673,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateAppProfile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateAppProfile", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableInstanceAdminRestTransport._UpdateAppProfile._get_response( @@ -2761,7 +3721,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_app_profile(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_app_profile", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateAppProfile", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateCluster( @@ -2800,7 +3782,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the update cluster method over HTTP. @@ -2813,8 +3795,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -2827,6 +3811,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_http_options() ) + request, metadata = self._interceptor.pre_update_cluster(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateCluster._get_transcoded_request( http_options, request @@ -2841,6 +3826,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateCluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateCluster", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._UpdateCluster._get_response( self._host, @@ -2860,7 +3872,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_cluster(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_cluster", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateCluster", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateInstance( @@ -2899,7 +3933,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> instance.Instance: r"""Call the update instance method over HTTP. @@ -2914,8 +3948,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.instance.Instance: @@ -2931,6 +3967,7 @@ def __call__( http_options = ( _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_http_options() ) + request, metadata = self._interceptor.pre_update_instance(request, metadata) transcoded_request = _BaseBigtableInstanceAdminRestTransport._BaseUpdateInstance._get_transcoded_request( http_options, request @@ -2945,6 +3982,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableInstanceAdminClient.UpdateInstance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateInstance", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableInstanceAdminRestTransport._UpdateInstance._get_response( self._host, @@ -2966,7 +4030,29 @@ def __call__( pb_resp = instance.Instance.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_instance(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = instance.Instance.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableInstanceAdminClient.update_instance", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableInstanceAdmin", + "rpcName": "UpdateInstance", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 2e9eb13eb..ba644311c 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging as std_logging from collections import OrderedDict import re from typing import ( @@ -57,6 +58,15 @@ from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport from .client import BigtableTableAdminClient +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class BigtableTableAdminAsyncClient: """Service for creating, configuring, and deleting Cloud @@ -289,6 +299,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable.admin_v2.BigtableTableAdminAsyncClient`.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "credentialsType": None, + }, + ) + async def create_table( self, request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, @@ -298,7 +330,7 @@ async def create_table( table: Optional[gba_table.Table] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gba_table.Table: r"""Creates a new table in the specified instance. The table can be created with a full set of initial @@ -333,8 +365,10 @@ async def create_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -405,7 +439,7 @@ async def create_table_from_snapshot( source_snapshot: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table @@ -457,8 +491,10 @@ async def create_table_from_snapshot( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -534,7 +570,7 @@ async def list_tables( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListTablesAsyncPager: r"""Lists all tables served from a specified instance. @@ -553,8 +589,10 @@ async def list_tables( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesAsyncPager: @@ -629,7 +667,7 @@ async def get_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Gets metadata information about the specified table. @@ -648,8 +686,10 @@ async def get_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -713,7 +753,7 @@ async def update_table( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates a specified table. @@ -750,8 +790,10 @@ async def update_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -827,7 +869,7 @@ async def delete_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes a specified table and all of its data. @@ -847,8 +889,10 @@ async def delete_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -902,7 +946,7 @@ async def undelete_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Restores a specified table which was accidentally deleted. @@ -922,8 +966,10 @@ async def undelete_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -999,7 +1045,7 @@ async def create_authorized_view( authorized_view_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new AuthorizedView in a table. @@ -1035,8 +1081,10 @@ async def create_authorized_view( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1115,7 +1163,7 @@ async def list_authorized_views( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAuthorizedViewsAsyncPager: r"""Lists all AuthorizedViews from a specific table. @@ -1134,8 +1182,10 @@ async def list_authorized_views( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsAsyncPager: @@ -1212,7 +1262,7 @@ async def get_authorized_view( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.AuthorizedView: r"""Gets information from a specified AuthorizedView. @@ -1231,8 +1281,10 @@ async def get_authorized_view( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AuthorizedView: @@ -1300,7 +1352,7 @@ async def update_authorized_view( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Updates an AuthorizedView in a table. @@ -1333,8 +1385,10 @@ async def update_authorized_view( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1413,7 +1467,7 @@ async def delete_authorized_view( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes a specified AuthorizedView. @@ -1432,8 +1486,10 @@ async def delete_authorized_view( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -1492,7 +1548,7 @@ async def modify_column_families( ] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Performs a series of column family modifications on the specified table. Either all or none of the @@ -1527,8 +1583,10 @@ async def modify_column_families( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -1592,7 +1650,7 @@ async def drop_row_range( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently drop/delete a row range from a specified table. The request can specify whether to delete all @@ -1606,8 +1664,10 @@ async def drop_row_range( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Use the request object if provided (there's no risk of modifying the input as @@ -1647,7 +1707,7 @@ async def generate_consistency_token( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: r"""Generates a consistency token for a Table, which can be used in CheckConsistency to check whether mutations @@ -1670,8 +1730,10 @@ async def generate_consistency_token( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: @@ -1737,7 +1799,7 @@ async def check_consistency( consistency_token: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.CheckConsistencyResponse: r"""Checks replication consistency based on a consistency token, that is, if replication has caught up based on @@ -1766,8 +1828,10 @@ async def check_consistency( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: @@ -1835,7 +1899,7 @@ async def snapshot_table( description: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table @@ -1893,8 +1957,10 @@ async def snapshot_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -1979,7 +2045,7 @@ async def get_snapshot( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Snapshot: r"""Gets metadata information about the specified snapshot. @@ -2012,8 +2078,10 @@ async def get_snapshot( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Snapshot: @@ -2087,7 +2155,7 @@ async def list_snapshots( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListSnapshotsAsyncPager: r"""Lists all snapshots associated with the specified cluster. @@ -2123,8 +2191,10 @@ async def list_snapshots( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsAsyncPager: @@ -2208,7 +2278,7 @@ async def delete_snapshot( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes the specified snapshot. @@ -2241,8 +2311,10 @@ async def delete_snapshot( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -2296,7 +2368,7 @@ async def create_backup( backup: Optional[table.Backup] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Starts creating a new Cloud Bigtable Backup. The returned backup [long-running operation][google.longrunning.Operation] can be @@ -2341,8 +2413,10 @@ async def create_backup( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2418,7 +2492,7 @@ async def get_backup( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Backup: r"""Gets metadata on a pending or completed Cloud Bigtable Backup. @@ -2437,8 +2511,10 @@ async def get_backup( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Backup: @@ -2498,7 +2574,7 @@ async def update_backup( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Backup: r"""Updates a pending or completed Cloud Bigtable Backup. @@ -2532,8 +2608,10 @@ async def update_backup( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Backup: @@ -2596,7 +2674,7 @@ async def delete_backup( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a pending or completed Cloud Bigtable backup. @@ -2615,8 +2693,10 @@ async def delete_backup( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -2668,7 +2748,7 @@ async def list_backups( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListBackupsAsyncPager: r"""Lists Cloud Bigtable backups. Returns both completed and pending backups. @@ -2691,8 +2771,10 @@ async def list_backups( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager: @@ -2766,7 +2848,7 @@ async def restore_table( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Create a new table by restoring from a completed backup. The returned table [long-running @@ -2784,8 +2866,10 @@ async def restore_table( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2846,7 +2930,7 @@ async def copy_backup( expire_time: Optional[timestamp_pb2.Timestamp] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation_async.AsyncOperation: r"""Copy a Cloud Bigtable backup to a new backup in the destination cluster located in the destination instance @@ -2903,8 +2987,10 @@ async def copy_backup( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation_async.AsyncOperation: @@ -2982,7 +3068,7 @@ async def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for a Table or Backup resource. Returns an empty policy if the resource exists @@ -3003,8 +3089,10 @@ async def get_iam_policy( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -3090,7 +3178,7 @@ async def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on a Table or Backup resource. Replaces any existing policy. @@ -3110,8 +3198,10 @@ async def set_iam_policy( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -3198,7 +3288,7 @@ async def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified Table or Backup resource. @@ -3227,8 +3317,10 @@ async def test_iam_permissions( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index 502f0085c..78e1b769a 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -14,6 +14,7 @@ # limitations under the License. # from collections import OrderedDict +import logging as std_logging import os import re from typing import ( @@ -48,6 +49,15 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers @@ -731,6 +741,10 @@ def __init__( # Initialize the universe domain validation. self._is_universe_domain_valid = False + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( @@ -797,6 +811,29 @@ def __init__( api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable.admin_v2.BigtableTableAdminClient`.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "credentialsType": None, + }, + ) + def create_table( self, request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, @@ -806,7 +843,7 @@ def create_table( table: Optional[gba_table.Table] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gba_table.Table: r"""Creates a new table in the specified instance. The table can be created with a full set of initial @@ -841,8 +878,10 @@ def create_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -910,7 +949,7 @@ def create_table_from_snapshot( source_snapshot: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a new table from the specified snapshot. The target table must not exist. The snapshot and the table @@ -962,8 +1001,10 @@ def create_table_from_snapshot( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1038,7 +1079,7 @@ def list_tables( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListTablesPager: r"""Lists all tables served from a specified instance. @@ -1057,8 +1098,10 @@ def list_tables( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesPager: @@ -1130,7 +1173,7 @@ def get_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Gets metadata information about the specified table. @@ -1149,8 +1192,10 @@ def get_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -1211,7 +1256,7 @@ def update_table( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates a specified table. @@ -1248,8 +1293,10 @@ def update_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1322,7 +1369,7 @@ def delete_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes a specified table and all of its data. @@ -1342,8 +1389,10 @@ def delete_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -1394,7 +1443,7 @@ def undelete_table( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Restores a specified table which was accidentally deleted. @@ -1414,8 +1463,10 @@ def undelete_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1488,7 +1539,7 @@ def create_authorized_view( authorized_view_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a new AuthorizedView in a table. @@ -1524,8 +1575,10 @@ def create_authorized_view( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1601,7 +1654,7 @@ def list_authorized_views( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListAuthorizedViewsPager: r"""Lists all AuthorizedViews from a specific table. @@ -1620,8 +1673,10 @@ def list_authorized_views( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListAuthorizedViewsPager: @@ -1695,7 +1750,7 @@ def get_authorized_view( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.AuthorizedView: r"""Gets information from a specified AuthorizedView. @@ -1714,8 +1769,10 @@ def get_authorized_view( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.AuthorizedView: @@ -1780,7 +1837,7 @@ def update_authorized_view( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Updates an AuthorizedView in a table. @@ -1813,8 +1870,10 @@ def update_authorized_view( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -1890,7 +1949,7 @@ def delete_authorized_view( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes a specified AuthorizedView. @@ -1909,8 +1968,10 @@ def delete_authorized_view( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -1966,7 +2027,7 @@ def modify_column_families( ] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Performs a series of column family modifications on the specified table. Either all or none of the @@ -2001,8 +2062,10 @@ def modify_column_families( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Table: @@ -2063,7 +2126,7 @@ def drop_row_range( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently drop/delete a row range from a specified table. The request can specify whether to delete all @@ -2077,8 +2140,10 @@ def drop_row_range( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Use the request object if provided (there's no risk of modifying the input as @@ -2116,7 +2181,7 @@ def generate_consistency_token( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: r"""Generates a consistency token for a Table, which can be used in CheckConsistency to check whether mutations @@ -2139,8 +2204,10 @@ def generate_consistency_token( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: @@ -2205,7 +2272,7 @@ def check_consistency( consistency_token: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.CheckConsistencyResponse: r"""Checks replication consistency based on a consistency token, that is, if replication has caught up based on @@ -2234,8 +2301,10 @@ def check_consistency( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: @@ -2300,7 +2369,7 @@ def snapshot_table( description: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Creates a new snapshot in the specified cluster from the specified source table. The cluster and the table @@ -2358,8 +2427,10 @@ def snapshot_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2441,7 +2512,7 @@ def get_snapshot( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Snapshot: r"""Gets metadata information about the specified snapshot. @@ -2474,8 +2545,10 @@ def get_snapshot( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Snapshot: @@ -2546,7 +2619,7 @@ def list_snapshots( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListSnapshotsPager: r"""Lists all snapshots associated with the specified cluster. @@ -2582,8 +2655,10 @@ def list_snapshots( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsPager: @@ -2664,7 +2739,7 @@ def delete_snapshot( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Permanently deletes the specified snapshot. @@ -2697,8 +2772,10 @@ def delete_snapshot( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -2749,7 +2826,7 @@ def create_backup( backup: Optional[table.Backup] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Starts creating a new Cloud Bigtable Backup. The returned backup [long-running operation][google.longrunning.Operation] can be @@ -2794,8 +2871,10 @@ def create_backup( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -2868,7 +2947,7 @@ def get_backup( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Backup: r"""Gets metadata on a pending or completed Cloud Bigtable Backup. @@ -2887,8 +2966,10 @@ def get_backup( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Backup: @@ -2945,7 +3026,7 @@ def update_backup( update_mask: Optional[field_mask_pb2.FieldMask] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Backup: r"""Updates a pending or completed Cloud Bigtable Backup. @@ -2979,8 +3060,10 @@ def update_backup( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.types.Backup: @@ -3040,7 +3123,7 @@ def delete_backup( name: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> None: r"""Deletes a pending or completed Cloud Bigtable backup. @@ -3059,8 +3142,10 @@ def delete_backup( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ # Create or coerce a protobuf request object. # - Quick check: If we got a request object, we should *not* have @@ -3109,7 +3194,7 @@ def list_backups( parent: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> pagers.ListBackupsPager: r"""Lists Cloud Bigtable backups. Returns both completed and pending backups. @@ -3132,8 +3217,10 @@ def list_backups( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager: @@ -3204,7 +3291,7 @@ def restore_table( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Create a new table by restoring from a completed backup. The returned table [long-running @@ -3222,8 +3309,10 @@ def restore_table( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -3282,7 +3371,7 @@ def copy_backup( expire_time: Optional[timestamp_pb2.Timestamp] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operation.Operation: r"""Copy a Cloud Bigtable backup to a new backup in the destination cluster located in the destination instance @@ -3339,8 +3428,10 @@ def copy_backup( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.api_core.operation.Operation: @@ -3415,7 +3506,7 @@ def get_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Gets the access control policy for a Table or Backup resource. Returns an empty policy if the resource exists @@ -3436,8 +3527,10 @@ def get_iam_policy( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -3524,7 +3617,7 @@ def set_iam_policy( resource: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Sets the access control policy on a Table or Backup resource. Replaces any existing policy. @@ -3544,8 +3637,10 @@ def set_iam_policy( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.policy_pb2.Policy: @@ -3633,7 +3728,7 @@ def test_iam_permissions( permissions: Optional[MutableSequence[str]] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Returns permissions that the caller has on the specified Table or Backup resource. @@ -3662,8 +3757,10 @@ def test_iam_permissions( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py index 5e20fbc5f..4351a5814 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py @@ -67,7 +67,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -81,8 +81,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListTablesRequest(request) @@ -141,7 +143,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -155,8 +157,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListTablesRequest(request) @@ -219,7 +223,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -233,8 +237,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) @@ -295,7 +301,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -309,8 +315,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListAuthorizedViewsRequest(request) @@ -375,7 +383,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -389,8 +397,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListSnapshotsRequest(request) @@ -449,7 +459,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -463,8 +473,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListSnapshotsRequest(request) @@ -527,7 +539,7 @@ def __init__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiate the pager. @@ -541,8 +553,10 @@ def __init__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListBackupsRequest(request) @@ -601,7 +615,7 @@ def __init__( *, retry: OptionalAsyncRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = () + metadata: Sequence[Tuple[str, Union[str, bytes]]] = () ): """Instantiates the pager. @@ -615,8 +629,10 @@ def __init__( retry (google.api_core.retry.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ self._method = method self._request = bigtable_table_admin.ListBackupsRequest(request) diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index 8b0eadbbc..59c701b8f 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union @@ -22,8 +25,11 @@ import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_table_admin from google.cloud.bigtable_admin_v2.types import table @@ -34,6 +40,81 @@ from google.protobuf import empty_pb2 # type: ignore from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": client_call_details.method, + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableTableAdminGrpcTransport(BigtableTableAdminTransport): """gRPC backend transport for BigtableTableAdmin. @@ -192,7 +273,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -256,7 +342,9 @@ def operations_client(self) -> operations_v1.OperationsClient: """ # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient(self.grpc_channel) + self._operations_client = operations_v1.OperationsClient( + self._logged_channel + ) # Return the client from cache. return self._operations_client @@ -282,7 +370,7 @@ def create_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_table" not in self._stubs: - self._stubs["create_table"] = self.grpc_channel.unary_unary( + self._stubs["create_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", request_serializer=bigtable_table_admin.CreateTableRequest.serialize, response_deserializer=gba_table.Table.deserialize, @@ -319,7 +407,9 @@ def create_table_from_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_table_from_snapshot" not in self._stubs: - self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary( + self._stubs[ + "create_table_from_snapshot" + ] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -348,7 +438,7 @@ def list_tables( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_tables" not in self._stubs: - self._stubs["list_tables"] = self.grpc_channel.unary_unary( + self._stubs["list_tables"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", request_serializer=bigtable_table_admin.ListTablesRequest.serialize, response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, @@ -374,7 +464,7 @@ def get_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_table" not in self._stubs: - self._stubs["get_table"] = self.grpc_channel.unary_unary( + self._stubs["get_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", request_serializer=bigtable_table_admin.GetTableRequest.serialize, response_deserializer=table.Table.deserialize, @@ -400,7 +490,7 @@ def update_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_table" not in self._stubs: - self._stubs["update_table"] = self.grpc_channel.unary_unary( + self._stubs["update_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -427,7 +517,7 @@ def delete_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_table" not in self._stubs: - self._stubs["delete_table"] = self.grpc_channel.unary_unary( + self._stubs["delete_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -456,7 +546,7 @@ def undelete_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "undelete_table" not in self._stubs: - self._stubs["undelete_table"] = self.grpc_channel.unary_unary( + self._stubs["undelete_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -484,7 +574,7 @@ def create_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_authorized_view" not in self._stubs: - self._stubs["create_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["create_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateAuthorizedView", request_serializer=bigtable_table_admin.CreateAuthorizedViewRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -513,7 +603,7 @@ def list_authorized_views( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_authorized_views" not in self._stubs: - self._stubs["list_authorized_views"] = self.grpc_channel.unary_unary( + self._stubs["list_authorized_views"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListAuthorizedViews", request_serializer=bigtable_table_admin.ListAuthorizedViewsRequest.serialize, response_deserializer=bigtable_table_admin.ListAuthorizedViewsResponse.deserialize, @@ -541,7 +631,7 @@ def get_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_authorized_view" not in self._stubs: - self._stubs["get_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["get_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetAuthorizedView", request_serializer=bigtable_table_admin.GetAuthorizedViewRequest.serialize, response_deserializer=table.AuthorizedView.deserialize, @@ -569,7 +659,7 @@ def update_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_authorized_view" not in self._stubs: - self._stubs["update_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["update_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateAuthorizedView", request_serializer=bigtable_table_admin.UpdateAuthorizedViewRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -595,7 +685,7 @@ def delete_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_authorized_view" not in self._stubs: - self._stubs["delete_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["delete_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteAuthorizedView", request_serializer=bigtable_table_admin.DeleteAuthorizedViewRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -625,7 +715,7 @@ def modify_column_families( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "modify_column_families" not in self._stubs: - self._stubs["modify_column_families"] = self.grpc_channel.unary_unary( + self._stubs["modify_column_families"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, response_deserializer=table.Table.deserialize, @@ -654,7 +744,7 @@ def drop_row_range( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "drop_row_range" not in self._stubs: - self._stubs["drop_row_range"] = self.grpc_channel.unary_unary( + self._stubs["drop_row_range"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -687,7 +777,9 @@ def generate_consistency_token( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "generate_consistency_token" not in self._stubs: - self._stubs["generate_consistency_token"] = self.grpc_channel.unary_unary( + self._stubs[ + "generate_consistency_token" + ] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, @@ -719,7 +811,7 @@ def check_consistency( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "check_consistency" not in self._stubs: - self._stubs["check_consistency"] = self.grpc_channel.unary_unary( + self._stubs["check_consistency"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, @@ -756,7 +848,7 @@ def snapshot_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "snapshot_table" not in self._stubs: - self._stubs["snapshot_table"] = self.grpc_channel.unary_unary( + self._stubs["snapshot_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -789,7 +881,7 @@ def get_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_snapshot" not in self._stubs: - self._stubs["get_snapshot"] = self.grpc_channel.unary_unary( + self._stubs["get_snapshot"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, response_deserializer=table.Snapshot.deserialize, @@ -825,7 +917,7 @@ def list_snapshots( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_snapshots" not in self._stubs: - self._stubs["list_snapshots"] = self.grpc_channel.unary_unary( + self._stubs["list_snapshots"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, @@ -858,7 +950,7 @@ def delete_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_snapshot" not in self._stubs: - self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary( + self._stubs["delete_snapshot"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -892,7 +984,7 @@ def create_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_backup" not in self._stubs: - self._stubs["create_backup"] = self.grpc_channel.unary_unary( + self._stubs["create_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -919,7 +1011,7 @@ def get_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_backup" not in self._stubs: - self._stubs["get_backup"] = self.grpc_channel.unary_unary( + self._stubs["get_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", request_serializer=bigtable_table_admin.GetBackupRequest.serialize, response_deserializer=table.Backup.deserialize, @@ -945,7 +1037,7 @@ def update_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_backup" not in self._stubs: - self._stubs["update_backup"] = self.grpc_channel.unary_unary( + self._stubs["update_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, response_deserializer=table.Backup.deserialize, @@ -971,7 +1063,7 @@ def delete_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_backup" not in self._stubs: - self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + self._stubs["delete_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -1001,7 +1093,7 @@ def list_backups( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_backups" not in self._stubs: - self._stubs["list_backups"] = self.grpc_channel.unary_unary( + self._stubs["list_backups"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, @@ -1034,7 +1126,7 @@ def restore_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "restore_table" not in self._stubs: - self._stubs["restore_table"] = self.grpc_channel.unary_unary( + self._stubs["restore_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1062,7 +1154,7 @@ def copy_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "copy_backup" not in self._stubs: - self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + self._stubs["copy_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1090,7 +1182,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -1117,7 +1209,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -1147,7 +1239,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -1155,7 +1247,7 @@ def test_iam_permissions( return self._stubs["test_iam_permissions"] def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index 520c7c83c..5b62722f0 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -14,6 +14,9 @@ # limitations under the License. # import inspect +import json +import pickle +import logging as std_logging import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union @@ -24,8 +27,11 @@ from google.api_core import operations_v1 from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_table_admin @@ -38,6 +44,82 @@ from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO from .grpc import BigtableTableAdminGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableTableAdminGrpcAsyncIOTransport(BigtableTableAdminTransport): """gRPC AsyncIO backend transport for BigtableTableAdmin. @@ -239,10 +321,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel self._wrap_with_kind = ( "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -265,7 +350,7 @@ def operations_client(self) -> operations_v1.OperationsAsyncClient: # Quick check: Only create a new client if we do not already have one. if self._operations_client is None: self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel + self._logged_channel ) # Return the client from cache. @@ -294,7 +379,7 @@ def create_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_table" not in self._stubs: - self._stubs["create_table"] = self.grpc_channel.unary_unary( + self._stubs["create_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable", request_serializer=bigtable_table_admin.CreateTableRequest.serialize, response_deserializer=gba_table.Table.deserialize, @@ -332,7 +417,9 @@ def create_table_from_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_table_from_snapshot" not in self._stubs: - self._stubs["create_table_from_snapshot"] = self.grpc_channel.unary_unary( + self._stubs[ + "create_table_from_snapshot" + ] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot", request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -361,7 +448,7 @@ def list_tables( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_tables" not in self._stubs: - self._stubs["list_tables"] = self.grpc_channel.unary_unary( + self._stubs["list_tables"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListTables", request_serializer=bigtable_table_admin.ListTablesRequest.serialize, response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, @@ -387,7 +474,7 @@ def get_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_table" not in self._stubs: - self._stubs["get_table"] = self.grpc_channel.unary_unary( + self._stubs["get_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetTable", request_serializer=bigtable_table_admin.GetTableRequest.serialize, response_deserializer=table.Table.deserialize, @@ -415,7 +502,7 @@ def update_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_table" not in self._stubs: - self._stubs["update_table"] = self.grpc_channel.unary_unary( + self._stubs["update_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable", request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -444,7 +531,7 @@ def delete_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_table" not in self._stubs: - self._stubs["delete_table"] = self.grpc_channel.unary_unary( + self._stubs["delete_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable", request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -473,7 +560,7 @@ def undelete_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "undelete_table" not in self._stubs: - self._stubs["undelete_table"] = self.grpc_channel.unary_unary( + self._stubs["undelete_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable", request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -502,7 +589,7 @@ def create_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_authorized_view" not in self._stubs: - self._stubs["create_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["create_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateAuthorizedView", request_serializer=bigtable_table_admin.CreateAuthorizedViewRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -531,7 +618,7 @@ def list_authorized_views( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_authorized_views" not in self._stubs: - self._stubs["list_authorized_views"] = self.grpc_channel.unary_unary( + self._stubs["list_authorized_views"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListAuthorizedViews", request_serializer=bigtable_table_admin.ListAuthorizedViewsRequest.serialize, response_deserializer=bigtable_table_admin.ListAuthorizedViewsResponse.deserialize, @@ -559,7 +646,7 @@ def get_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_authorized_view" not in self._stubs: - self._stubs["get_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["get_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetAuthorizedView", request_serializer=bigtable_table_admin.GetAuthorizedViewRequest.serialize, response_deserializer=table.AuthorizedView.deserialize, @@ -588,7 +675,7 @@ def update_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_authorized_view" not in self._stubs: - self._stubs["update_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["update_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateAuthorizedView", request_serializer=bigtable_table_admin.UpdateAuthorizedViewRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -616,7 +703,7 @@ def delete_authorized_view( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_authorized_view" not in self._stubs: - self._stubs["delete_authorized_view"] = self.grpc_channel.unary_unary( + self._stubs["delete_authorized_view"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteAuthorizedView", request_serializer=bigtable_table_admin.DeleteAuthorizedViewRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -648,7 +735,7 @@ def modify_column_families( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "modify_column_families" not in self._stubs: - self._stubs["modify_column_families"] = self.grpc_channel.unary_unary( + self._stubs["modify_column_families"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies", request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, response_deserializer=table.Table.deserialize, @@ -679,7 +766,7 @@ def drop_row_range( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "drop_row_range" not in self._stubs: - self._stubs["drop_row_range"] = self.grpc_channel.unary_unary( + self._stubs["drop_row_range"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange", request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -712,7 +799,9 @@ def generate_consistency_token( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "generate_consistency_token" not in self._stubs: - self._stubs["generate_consistency_token"] = self.grpc_channel.unary_unary( + self._stubs[ + "generate_consistency_token" + ] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken", request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, @@ -744,7 +833,7 @@ def check_consistency( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "check_consistency" not in self._stubs: - self._stubs["check_consistency"] = self.grpc_channel.unary_unary( + self._stubs["check_consistency"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency", request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, @@ -781,7 +870,7 @@ def snapshot_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "snapshot_table" not in self._stubs: - self._stubs["snapshot_table"] = self.grpc_channel.unary_unary( + self._stubs["snapshot_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable", request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -814,7 +903,7 @@ def get_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_snapshot" not in self._stubs: - self._stubs["get_snapshot"] = self.grpc_channel.unary_unary( + self._stubs["get_snapshot"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot", request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, response_deserializer=table.Snapshot.deserialize, @@ -850,7 +939,7 @@ def list_snapshots( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_snapshots" not in self._stubs: - self._stubs["list_snapshots"] = self.grpc_channel.unary_unary( + self._stubs["list_snapshots"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots", request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, @@ -885,7 +974,7 @@ def delete_snapshot( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_snapshot" not in self._stubs: - self._stubs["delete_snapshot"] = self.grpc_channel.unary_unary( + self._stubs["delete_snapshot"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot", request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -921,7 +1010,7 @@ def create_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "create_backup" not in self._stubs: - self._stubs["create_backup"] = self.grpc_channel.unary_unary( + self._stubs["create_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup", request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -948,7 +1037,7 @@ def get_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_backup" not in self._stubs: - self._stubs["get_backup"] = self.grpc_channel.unary_unary( + self._stubs["get_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup", request_serializer=bigtable_table_admin.GetBackupRequest.serialize, response_deserializer=table.Backup.deserialize, @@ -974,7 +1063,7 @@ def update_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "update_backup" not in self._stubs: - self._stubs["update_backup"] = self.grpc_channel.unary_unary( + self._stubs["update_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup", request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, response_deserializer=table.Backup.deserialize, @@ -1002,7 +1091,7 @@ def delete_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "delete_backup" not in self._stubs: - self._stubs["delete_backup"] = self.grpc_channel.unary_unary( + self._stubs["delete_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup", request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, response_deserializer=empty_pb2.Empty.FromString, @@ -1032,7 +1121,7 @@ def list_backups( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "list_backups" not in self._stubs: - self._stubs["list_backups"] = self.grpc_channel.unary_unary( + self._stubs["list_backups"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups", request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, @@ -1067,7 +1156,7 @@ def restore_table( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "restore_table" not in self._stubs: - self._stubs["restore_table"] = self.grpc_channel.unary_unary( + self._stubs["restore_table"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable", request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1097,7 +1186,7 @@ def copy_backup( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "copy_backup" not in self._stubs: - self._stubs["copy_backup"] = self.grpc_channel.unary_unary( + self._stubs["copy_backup"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup", request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, response_deserializer=operations_pb2.Operation.FromString, @@ -1125,7 +1214,7 @@ def get_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "get_iam_policy" not in self._stubs: - self._stubs["get_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["get_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy", request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -1152,7 +1241,7 @@ def set_iam_policy( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "set_iam_policy" not in self._stubs: - self._stubs["set_iam_policy"] = self.grpc_channel.unary_unary( + self._stubs["set_iam_policy"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy", request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, response_deserializer=policy_pb2.Policy.FromString, @@ -1182,7 +1271,7 @@ def test_iam_permissions( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "test_iam_permissions" not in self._stubs: - self._stubs["test_iam_permissions"] = self.grpc_channel.unary_unary( + self._stubs["test_iam_permissions"] = self._logged_channel.unary_unary( "/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions", request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, @@ -1450,7 +1539,7 @@ def _wrap_method(self, func, *args, **kwargs): return gapic_v1.method_async.wrap_method(func, *args, **kwargs) def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() @property def kind(self) -> str: diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index b25ddec60..75cd43b43 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging +import json # type: ignore from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries @@ -49,6 +50,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, @@ -301,8 +310,11 @@ def post_update_table(self, response): def pre_check_consistency( self, request: bigtable_table_admin.CheckConsistencyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.CheckConsistencyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CheckConsistencyRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for check_consistency Override in a subclass to manipulate the request or metadata @@ -324,8 +336,10 @@ def post_check_consistency( def pre_copy_backup( self, request: bigtable_table_admin.CopyBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.CopyBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CopyBackupRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for copy_backup Override in a subclass to manipulate the request or metadata @@ -347,9 +361,10 @@ def post_copy_backup( def pre_create_authorized_view( self, request: bigtable_table_admin.CreateAuthorizedViewRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.CreateAuthorizedViewRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.CreateAuthorizedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_authorized_view @@ -372,8 +387,11 @@ def post_create_authorized_view( def pre_create_backup( self, request: bigtable_table_admin.CreateBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.CreateBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CreateBackupRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for create_backup Override in a subclass to manipulate the request or metadata @@ -395,8 +413,10 @@ def post_create_backup( def pre_create_table( self, request: bigtable_table_admin.CreateTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.CreateTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.CreateTableRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for create_table Override in a subclass to manipulate the request or metadata @@ -416,9 +436,10 @@ def post_create_table(self, response: gba_table.Table) -> gba_table.Table: def pre_create_table_from_snapshot( self, request: bigtable_table_admin.CreateTableFromSnapshotRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.CreateTableFromSnapshotRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.CreateTableFromSnapshotRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for create_table_from_snapshot @@ -441,9 +462,10 @@ def post_create_table_from_snapshot( def pre_delete_authorized_view( self, request: bigtable_table_admin.DeleteAuthorizedViewRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.DeleteAuthorizedViewRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.DeleteAuthorizedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for delete_authorized_view @@ -455,8 +477,11 @@ def pre_delete_authorized_view( def pre_delete_backup( self, request: bigtable_table_admin.DeleteBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.DeleteBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DeleteBackupRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for delete_backup Override in a subclass to manipulate the request or metadata @@ -467,8 +492,11 @@ def pre_delete_backup( def pre_delete_snapshot( self, request: bigtable_table_admin.DeleteSnapshotRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.DeleteSnapshotRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DeleteSnapshotRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for delete_snapshot Override in a subclass to manipulate the request or metadata @@ -479,8 +507,10 @@ def pre_delete_snapshot( def pre_delete_table( self, request: bigtable_table_admin.DeleteTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.DeleteTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DeleteTableRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for delete_table Override in a subclass to manipulate the request or metadata @@ -491,8 +521,11 @@ def pre_delete_table( def pre_drop_row_range( self, request: bigtable_table_admin.DropRowRangeRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.DropRowRangeRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.DropRowRangeRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for drop_row_range Override in a subclass to manipulate the request or metadata @@ -503,9 +536,10 @@ def pre_drop_row_range( def pre_generate_consistency_token( self, request: bigtable_table_admin.GenerateConsistencyTokenRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.GenerateConsistencyTokenRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.GenerateConsistencyTokenRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for generate_consistency_token @@ -528,9 +562,10 @@ def post_generate_consistency_token( def pre_get_authorized_view( self, request: bigtable_table_admin.GetAuthorizedViewRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.GetAuthorizedViewRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.GetAuthorizedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for get_authorized_view @@ -553,8 +588,10 @@ def post_get_authorized_view( def pre_get_backup( self, request: bigtable_table_admin.GetBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.GetBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GetBackupRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_backup Override in a subclass to manipulate the request or metadata @@ -574,8 +611,10 @@ def post_get_backup(self, response: table.Backup) -> table.Backup: def pre_get_iam_policy( self, request: iam_policy_pb2.GetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_iam_policy Override in a subclass to manipulate the request or metadata @@ -595,8 +634,10 @@ def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: def pre_get_snapshot( self, request: bigtable_table_admin.GetSnapshotRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.GetSnapshotRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GetSnapshotRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_snapshot Override in a subclass to manipulate the request or metadata @@ -616,8 +657,10 @@ def post_get_snapshot(self, response: table.Snapshot) -> table.Snapshot: def pre_get_table( self, request: bigtable_table_admin.GetTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.GetTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.GetTableRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for get_table Override in a subclass to manipulate the request or metadata @@ -637,9 +680,10 @@ def post_get_table(self, response: table.Table) -> table.Table: def pre_list_authorized_views( self, request: bigtable_table_admin.ListAuthorizedViewsRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.ListAuthorizedViewsRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.ListAuthorizedViewsRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for list_authorized_views @@ -662,8 +706,10 @@ def post_list_authorized_views( def pre_list_backups( self, request: bigtable_table_admin.ListBackupsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.ListBackupsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListBackupsRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for list_backups Override in a subclass to manipulate the request or metadata @@ -685,8 +731,11 @@ def post_list_backups( def pre_list_snapshots( self, request: bigtable_table_admin.ListSnapshotsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.ListSnapshotsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListSnapshotsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for list_snapshots Override in a subclass to manipulate the request or metadata @@ -708,8 +757,10 @@ def post_list_snapshots( def pre_list_tables( self, request: bigtable_table_admin.ListTablesRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.ListTablesRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.ListTablesRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for list_tables Override in a subclass to manipulate the request or metadata @@ -731,9 +782,10 @@ def post_list_tables( def pre_modify_column_families( self, request: bigtable_table_admin.ModifyColumnFamiliesRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.ModifyColumnFamiliesRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.ModifyColumnFamiliesRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for modify_column_families @@ -754,8 +806,11 @@ def post_modify_column_families(self, response: table.Table) -> table.Table: def pre_restore_table( self, request: bigtable_table_admin.RestoreTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.RestoreTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.RestoreTableRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for restore_table Override in a subclass to manipulate the request or metadata @@ -777,8 +832,10 @@ def post_restore_table( def pre_set_iam_policy( self, request: iam_policy_pb2.SetIamPolicyRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for set_iam_policy Override in a subclass to manipulate the request or metadata @@ -798,8 +855,11 @@ def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: def pre_snapshot_table( self, request: bigtable_table_admin.SnapshotTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.SnapshotTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.SnapshotTableRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for snapshot_table Override in a subclass to manipulate the request or metadata @@ -821,8 +881,11 @@ def post_snapshot_table( def pre_test_iam_permissions( self, request: iam_policy_pb2.TestIamPermissionsRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + iam_policy_pb2.TestIamPermissionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for test_iam_permissions Override in a subclass to manipulate the request or metadata @@ -844,8 +907,11 @@ def post_test_iam_permissions( def pre_undelete_table( self, request: bigtable_table_admin.UndeleteTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.UndeleteTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.UndeleteTableRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for undelete_table Override in a subclass to manipulate the request or metadata @@ -867,9 +933,10 @@ def post_undelete_table( def pre_update_authorized_view( self, request: bigtable_table_admin.UpdateAuthorizedViewRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable_table_admin.UpdateAuthorizedViewRequest, Sequence[Tuple[str, str]] + bigtable_table_admin.UpdateAuthorizedViewRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for update_authorized_view @@ -892,8 +959,11 @@ def post_update_authorized_view( def pre_update_backup( self, request: bigtable_table_admin.UpdateBackupRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.UpdateBackupRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.UpdateBackupRequest, + Sequence[Tuple[str, Union[str, bytes]]], + ]: """Pre-rpc interceptor for update_backup Override in a subclass to manipulate the request or metadata @@ -913,8 +983,10 @@ def post_update_backup(self, response: table.Backup) -> table.Backup: def pre_update_table( self, request: bigtable_table_admin.UpdateTableRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable_table_admin.UpdateTableRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable_table_admin.UpdateTableRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for update_table Override in a subclass to manipulate the request or metadata @@ -1113,7 +1185,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.CheckConsistencyResponse: r"""Call the check consistency method over HTTP. @@ -1124,8 +1196,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.CheckConsistencyResponse: @@ -1137,6 +1211,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseCheckConsistency._get_http_options() ) + request, metadata = self._interceptor.pre_check_consistency( request, metadata ) @@ -1153,6 +1228,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CheckConsistency", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CheckConsistency", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._CheckConsistency._get_response( self._host, @@ -1174,7 +1276,31 @@ def __call__( pb_resp = bigtable_table_admin.CheckConsistencyResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_check_consistency(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.CheckConsistencyResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.check_consistency", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CheckConsistency", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CopyBackup( @@ -1212,7 +1338,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the copy backup method over HTTP. @@ -1223,8 +1349,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1237,6 +1365,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_http_options() ) + request, metadata = self._interceptor.pre_copy_backup(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCopyBackup._get_transcoded_request( http_options, request @@ -1251,6 +1380,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CopyBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CopyBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._CopyBackup._get_response( self._host, @@ -1270,7 +1426,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_copy_backup(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.copy_backup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CopyBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateAuthorizedView( @@ -1309,7 +1487,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create authorized view method over HTTP. @@ -1320,8 +1498,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1334,6 +1514,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseCreateAuthorizedView._get_http_options() ) + request, metadata = self._interceptor.pre_create_authorized_view( request, metadata ) @@ -1350,6 +1531,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateAuthorizedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateAuthorizedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._CreateAuthorizedView._get_response( @@ -1371,7 +1579,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_authorized_view(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_authorized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateAuthorizedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateBackup( @@ -1410,7 +1640,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create backup method over HTTP. @@ -1421,8 +1651,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1435,6 +1667,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_http_options() ) + request, metadata = self._interceptor.pre_create_backup(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateBackup._get_transcoded_request( http_options, request @@ -1449,6 +1682,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._CreateBackup._get_response( self._host, @@ -1468,7 +1728,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_backup(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_backup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateTable( @@ -1507,7 +1789,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> gba_table.Table: r"""Call the create table method over HTTP. @@ -1518,8 +1800,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.gba_table.Table: @@ -1533,6 +1817,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_http_options() ) + request, metadata = self._interceptor.pre_create_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseCreateTable._get_transcoded_request( http_options, request @@ -1547,6 +1832,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._CreateTable._get_response( self._host, @@ -1568,7 +1880,29 @@ def __call__( pb_resp = gba_table.Table.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_table(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = gba_table.Table.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _CreateTableFromSnapshot( @@ -1607,7 +1941,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the create table from snapshot method over HTTP. @@ -1626,8 +1960,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -1640,6 +1976,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseCreateTableFromSnapshot._get_http_options() ) + request, metadata = self._interceptor.pre_create_table_from_snapshot( request, metadata ) @@ -1656,6 +1993,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.CreateTableFromSnapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTableFromSnapshot", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._CreateTableFromSnapshot._get_response( @@ -1677,7 +2041,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_create_table_from_snapshot(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.create_table_from_snapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "CreateTableFromSnapshot", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _DeleteAuthorizedView( @@ -1715,7 +2101,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete authorized view method over HTTP. @@ -1726,13 +2112,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseBigtableTableAdminRestTransport._BaseDeleteAuthorizedView._get_http_options() ) + request, metadata = self._interceptor.pre_delete_authorized_view( request, metadata ) @@ -1745,6 +2134,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteAuthorizedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteAuthorizedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._DeleteAuthorizedView._get_response( @@ -1797,7 +2213,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete backup method over HTTP. @@ -1808,13 +2224,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_http_options() ) + request, metadata = self._interceptor.pre_delete_backup(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteBackup._get_transcoded_request( http_options, request @@ -1825,6 +2244,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._DeleteBackup._get_response( self._host, @@ -1875,7 +2321,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete snapshot method over HTTP. @@ -1893,13 +2339,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_http_options() ) + request, metadata = self._interceptor.pre_delete_snapshot(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteSnapshot._get_transcoded_request( http_options, request @@ -1910,6 +2359,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteSnapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteSnapshot", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._DeleteSnapshot._get_response( self._host, @@ -1960,7 +2436,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the delete table method over HTTP. @@ -1971,13 +2447,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_http_options() ) + request, metadata = self._interceptor.pre_delete_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDeleteTable._get_transcoded_request( http_options, request @@ -1988,6 +2467,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DeleteTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DeleteTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._DeleteTable._get_response( self._host, @@ -2039,7 +2545,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ): r"""Call the drop row range method over HTTP. @@ -2050,13 +2556,16 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. """ http_options = ( _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_http_options() ) + request, metadata = self._interceptor.pre_drop_row_range(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseDropRowRange._get_transcoded_request( http_options, request @@ -2071,6 +2580,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.DropRowRange", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "DropRowRange", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._DropRowRange._get_response( self._host, @@ -2123,7 +2659,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: r"""Call the generate consistency token method over HTTP. @@ -2135,8 +2671,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.GenerateConsistencyTokenResponse: @@ -2148,6 +2686,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseGenerateConsistencyToken._get_http_options() ) + request, metadata = self._interceptor.pre_generate_consistency_token( request, metadata ) @@ -2164,6 +2703,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GenerateConsistencyToken", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GenerateConsistencyToken", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._GenerateConsistencyToken._get_response( @@ -2187,7 +2753,33 @@ def __call__( pb_resp = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_generate_consistency_token(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.generate_consistency_token", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GenerateConsistencyToken", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetAuthorizedView( @@ -2225,7 +2817,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.AuthorizedView: r"""Call the get authorized view method over HTTP. @@ -2236,8 +2828,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.AuthorizedView: @@ -2253,6 +2847,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseGetAuthorizedView._get_http_options() ) + request, metadata = self._interceptor.pre_get_authorized_view( request, metadata ) @@ -2265,6 +2860,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetAuthorizedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetAuthorizedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._GetAuthorizedView._get_response( self._host, @@ -2285,7 +2907,29 @@ def __call__( pb_resp = table.AuthorizedView.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_authorized_view(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.AuthorizedView.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_authorized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetAuthorizedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetBackup( @@ -2322,7 +2966,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Backup: r"""Call the get backup method over HTTP. @@ -2333,8 +2977,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.Backup: @@ -2344,6 +2990,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_http_options() ) + request, metadata = self._interceptor.pre_get_backup(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetBackup._get_transcoded_request( http_options, request @@ -2354,6 +3001,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._GetBackup._get_response( self._host, @@ -2374,7 +3048,29 @@ def __call__( pb_resp = table.Backup.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_backup(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Backup.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_backup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetIamPolicy( @@ -2413,7 +3109,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the get iam policy method over HTTP. @@ -2423,8 +3119,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -2509,6 +3207,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_http_options() ) + request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetIamPolicy._get_transcoded_request( http_options, request @@ -2523,6 +3222,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetIamPolicy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._GetIamPolicy._get_response( self._host, @@ -2544,7 +3270,29 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_iam_policy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetSnapshot( @@ -2582,7 +3330,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Snapshot: r"""Call the get snapshot method over HTTP. @@ -2600,8 +3348,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.Snapshot: @@ -2624,6 +3374,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_http_options() ) + request, metadata = self._interceptor.pre_get_snapshot(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetSnapshot._get_transcoded_request( http_options, request @@ -2634,6 +3385,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetSnapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetSnapshot", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._GetSnapshot._get_response( self._host, @@ -2654,7 +3432,29 @@ def __call__( pb_resp = table.Snapshot.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_snapshot(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Snapshot.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_snapshot", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetSnapshot", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _GetTable( @@ -2691,7 +3491,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Call the get table method over HTTP. @@ -2702,8 +3502,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.Table: @@ -2717,6 +3519,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseGetTable._get_http_options() ) + request, metadata = self._interceptor.pre_get_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseGetTable._get_transcoded_request( http_options, request @@ -2727,6 +3530,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.GetTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._GetTable._get_response( self._host, @@ -2747,7 +3577,29 @@ def __call__( pb_resp = table.Table.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_get_table(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Table.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.get_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "GetTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListAuthorizedViews( @@ -2785,7 +3637,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.ListAuthorizedViewsResponse: r"""Call the list authorized views method over HTTP. @@ -2796,8 +3648,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.ListAuthorizedViewsResponse: @@ -2809,6 +3663,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseListAuthorizedViews._get_http_options() ) + request, metadata = self._interceptor.pre_list_authorized_views( request, metadata ) @@ -2821,6 +3676,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListAuthorizedViews", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListAuthorizedViews", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._ListAuthorizedViews._get_response( @@ -2843,7 +3725,33 @@ def __call__( pb_resp = bigtable_table_admin.ListAuthorizedViewsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_authorized_views(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.ListAuthorizedViewsResponse.to_json( + response + ) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_authorized_views", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListAuthorizedViews", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListBackups( @@ -2881,7 +3789,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.ListBackupsResponse: r"""Call the list backups method over HTTP. @@ -2892,8 +3800,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.ListBackupsResponse: @@ -2905,6 +3815,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseListBackups._get_http_options() ) + request, metadata = self._interceptor.pre_list_backups(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListBackups._get_transcoded_request( http_options, request @@ -2915,6 +3826,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListBackups", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListBackups", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._ListBackups._get_response( self._host, @@ -2935,7 +3873,31 @@ def __call__( pb_resp = bigtable_table_admin.ListBackupsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_backups(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable_table_admin.ListBackupsResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_backups", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListBackups", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListSnapshots( @@ -2973,7 +3935,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.ListSnapshotsResponse: r"""Call the list snapshots method over HTTP. @@ -2991,8 +3953,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.ListSnapshotsResponse: @@ -3011,6 +3975,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_http_options() ) + request, metadata = self._interceptor.pre_list_snapshots(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListSnapshots._get_transcoded_request( http_options, request @@ -3021,6 +3986,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListSnapshots", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListSnapshots", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._ListSnapshots._get_response( self._host, @@ -3041,7 +4033,31 @@ def __call__( pb_resp = bigtable_table_admin.ListSnapshotsResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_snapshots(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = ( + bigtable_table_admin.ListSnapshotsResponse.to_json(response) + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_snapshots", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListSnapshots", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ListTables( @@ -3078,7 +4094,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable_table_admin.ListTablesResponse: r"""Call the list tables method over HTTP. @@ -3089,8 +4105,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable_table_admin.ListTablesResponse: @@ -3102,6 +4120,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseListTables._get_http_options() ) + request, metadata = self._interceptor.pre_list_tables(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseListTables._get_transcoded_request( http_options, request @@ -3112,6 +4131,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ListTables", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListTables", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._ListTables._get_response( self._host, @@ -3132,7 +4178,31 @@ def __call__( pb_resp = bigtable_table_admin.ListTablesResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_list_tables(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable_table_admin.ListTablesResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.list_tables", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ListTables", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ModifyColumnFamilies( @@ -3171,7 +4241,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Table: r"""Call the modify column families method over HTTP. @@ -3182,8 +4252,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.Table: @@ -3197,6 +4269,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseModifyColumnFamilies._get_http_options() ) + request, metadata = self._interceptor.pre_modify_column_families( request, metadata ) @@ -3213,6 +4286,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.ModifyColumnFamilies", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ModifyColumnFamilies", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._ModifyColumnFamilies._get_response( @@ -3236,7 +4336,29 @@ def __call__( pb_resp = table.Table.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_modify_column_families(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Table.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.modify_column_families", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "ModifyColumnFamilies", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _RestoreTable( @@ -3275,7 +4397,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the restore table method over HTTP. @@ -3286,8 +4408,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -3300,6 +4424,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_http_options() ) + request, metadata = self._interceptor.pre_restore_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseRestoreTable._get_transcoded_request( http_options, request @@ -3314,6 +4439,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.RestoreTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "RestoreTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._RestoreTable._get_response( self._host, @@ -3333,7 +4485,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_restore_table(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.restore_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "RestoreTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _SetIamPolicy( @@ -3372,7 +4546,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> policy_pb2.Policy: r"""Call the set iam policy method over HTTP. @@ -3382,8 +4556,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.policy_pb2.Policy: @@ -3468,6 +4644,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_http_options() ) + request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseSetIamPolicy._get_transcoded_request( http_options, request @@ -3482,6 +4659,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.SetIamPolicy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "SetIamPolicy", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._SetIamPolicy._get_response( self._host, @@ -3503,7 +4707,29 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_set_iam_policy(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.set_iam_policy", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "SetIamPolicy", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _SnapshotTable( @@ -3542,7 +4768,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the snapshot table method over HTTP. @@ -3560,8 +4786,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -3574,6 +4802,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_http_options() ) + request, metadata = self._interceptor.pre_snapshot_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseSnapshotTable._get_transcoded_request( http_options, request @@ -3588,6 +4817,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.SnapshotTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "SnapshotTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._SnapshotTable._get_response( self._host, @@ -3607,7 +4863,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_snapshot_table(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.snapshot_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "SnapshotTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _TestIamPermissions( @@ -3646,7 +4924,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> iam_policy_pb2.TestIamPermissionsResponse: r"""Call the test iam permissions method over HTTP. @@ -3656,8 +4934,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.iam_policy_pb2.TestIamPermissionsResponse: @@ -3667,6 +4947,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseTestIamPermissions._get_http_options() ) + request, metadata = self._interceptor.pre_test_iam_permissions( request, metadata ) @@ -3683,6 +4964,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.TestIamPermissions", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "TestIamPermissions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._TestIamPermissions._get_response( @@ -3706,7 +5014,29 @@ def __call__( pb_resp = resp json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_test_iam_permissions(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.test_iam_permissions", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "TestIamPermissions", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UndeleteTable( @@ -3745,7 +5075,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the undelete table method over HTTP. @@ -3756,8 +5086,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -3770,6 +5102,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_http_options() ) + request, metadata = self._interceptor.pre_undelete_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUndeleteTable._get_transcoded_request( http_options, request @@ -3784,6 +5117,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UndeleteTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UndeleteTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._UndeleteTable._get_response( self._host, @@ -3803,7 +5163,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_undelete_table(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.undelete_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UndeleteTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateAuthorizedView( @@ -3842,7 +5224,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the update authorized view method over HTTP. @@ -3853,8 +5235,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -3867,6 +5251,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseUpdateAuthorizedView._get_http_options() ) + request, metadata = self._interceptor.pre_update_authorized_view( request, metadata ) @@ -3883,6 +5268,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateAuthorizedView", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateAuthorizedView", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = ( BigtableTableAdminRestTransport._UpdateAuthorizedView._get_response( @@ -3904,7 +5316,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_authorized_view(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_authorized_view", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateAuthorizedView", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateBackup( @@ -3943,7 +5377,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> table.Backup: r"""Call the update backup method over HTTP. @@ -3954,8 +5388,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.table.Backup: @@ -3965,6 +5401,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_http_options() ) + request, metadata = self._interceptor.pre_update_backup(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateBackup._get_transcoded_request( http_options, request @@ -3979,6 +5416,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateBackup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateBackup", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._UpdateBackup._get_response( self._host, @@ -4000,7 +5464,29 @@ def __call__( pb_resp = table.Backup.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_backup(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = table.Backup.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_backup", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateBackup", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _UpdateTable( @@ -4039,7 +5525,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> operations_pb2.Operation: r"""Call the update table method over HTTP. @@ -4050,8 +5536,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.operations_pb2.Operation: @@ -4064,6 +5552,7 @@ def __call__( http_options = ( _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_http_options() ) + request, metadata = self._interceptor.pre_update_table(request, metadata) transcoded_request = _BaseBigtableTableAdminRestTransport._BaseUpdateTable._get_transcoded_request( http_options, request @@ -4078,6 +5567,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = json_format.MessageToJson(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable.admin_v2.BigtableTableAdminClient.UpdateTable", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateTable", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableTableAdminRestTransport._UpdateTable._get_response( self._host, @@ -4097,7 +5613,29 @@ def __call__( # Return the response resp = operations_pb2.Operation() json_format.Parse(response.content, resp, ignore_unknown_fields=True) + resp = self._interceptor.post_update_table(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = json_format.MessageToJson(resp) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable.admin_v2.BigtableTableAdminClient.update_table", + extra={ + "serviceName": "google.bigtable.admin.v2.BigtableTableAdmin", + "rpcName": "UpdateTable", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp @property diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py index b36f525fa..3ea13cdba 100644 --- a/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -13,6 +13,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging as std_logging from collections import OrderedDict import re from typing import ( @@ -52,6 +53,15 @@ from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport from .client import BigtableClient +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + class BigtableAsyncClient: """Service for reading from and writing to existing Bigtable @@ -255,6 +265,28 @@ def __init__( client_info=client_info, ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable_v2.BigtableAsyncClient`.", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "universeDomain": getattr( + self._client._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._client._transport._credentials).__module__}.{type(self._client._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._client._transport, "_credentials") + else { + "serviceName": "google.bigtable.v2.Bigtable", + "credentialsType": None, + }, + ) + def read_rows( self, request: Optional[Union[bigtable.ReadRowsRequest, dict]] = None, @@ -263,7 +295,7 @@ def read_rows( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadRowsResponse]]: r"""Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to @@ -298,8 +330,10 @@ def read_rows( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: @@ -357,9 +391,9 @@ def read_rows( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -383,7 +417,7 @@ def sample_row_keys( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.SampleRowKeysResponse]]: r"""Returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of @@ -417,8 +451,10 @@ def sample_row_keys( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: @@ -476,9 +512,9 @@ def sample_row_keys( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -504,7 +540,7 @@ async def mutate_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.MutateRowResponse: r"""Mutates a row atomically. Cells already present in the row are left unchanged unless explicitly changed by ``mutation``. @@ -553,8 +589,10 @@ async def mutate_row( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.MutateRowResponse: @@ -616,9 +654,9 @@ async def mutate_row( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -643,7 +681,7 @@ def mutate_rows( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.MutateRowsResponse]]: r"""Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire @@ -689,8 +727,10 @@ def mutate_rows( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: @@ -750,9 +790,9 @@ def mutate_rows( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -780,7 +820,7 @@ async def check_and_mutate_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Mutates a row atomically based on the output of a predicate Reader filter. @@ -851,8 +891,10 @@ async def check_and_mutate_row( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: @@ -927,9 +969,9 @@ async def check_and_mutate_row( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -953,7 +995,7 @@ async def ping_and_warm( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.PingAndWarmResponse: r"""Warm up associated instance metadata for this connection. This call is not required but may be useful @@ -983,8 +1025,10 @@ async def ping_and_warm( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.PingAndWarmResponse: @@ -1032,9 +1076,9 @@ async def ping_and_warm( header_params["app_profile_id"] = request.app_profile_id if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -1060,7 +1104,7 @@ async def read_modify_write_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the @@ -1115,8 +1159,10 @@ async def read_modify_write_row( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: @@ -1178,9 +1224,9 @@ async def read_modify_write_row( ) if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() @@ -1206,7 +1252,7 @@ def generate_initial_change_stream_partitions( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[ AsyncIterable[bigtable.GenerateInitialChangeStreamPartitionsResponse] ]: @@ -1243,8 +1289,10 @@ def generate_initial_change_stream_partitions( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: @@ -1314,7 +1362,7 @@ def read_change_stream( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.ReadChangeStreamResponse]]: r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. Reads changes from a table's change stream. @@ -1348,8 +1396,10 @@ def read_change_stream( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: @@ -1417,7 +1467,7 @@ def execute_query( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Awaitable[AsyncIterable[bigtable.ExecuteQueryResponse]]: r"""Executes a BTQL query against a particular Cloud Bigtable instance. @@ -1450,8 +1500,10 @@ def execute_query( retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: AsyncIterable[google.cloud.bigtable_v2.types.ExecuteQueryResponse]: @@ -1500,9 +1552,9 @@ def execute_query( header_params["app_profile_id"] = request.app_profile_id if header_params: - metadata = tuple(metadata) - if all(m[0] != gapic_v1.routing_header.ROUTING_METADATA_KEY for m in metadata): - metadata += (gapic_v1.routing_header.to_grpc_metadata(header_params),) + metadata = tuple(metadata) + ( + gapic_v1.routing_header.to_grpc_metadata(header_params), + ) # Validate the universe domain. self._client._validate_universe_domain() diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py index a2534d539..76f7dade0 100644 --- a/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -14,6 +14,7 @@ # limitations under the License. # from collections import OrderedDict +import logging as std_logging import os import re from typing import ( @@ -49,6 +50,15 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data from google.cloud.bigtable_v2.types import request_stats @@ -620,6 +630,10 @@ def __init__( # Initialize the universe domain validation. self._is_universe_domain_valid = False + if CLIENT_LOGGING_SUPPORTED: # pragma: NO COVER + # Setup logging. + client_logging.initialize_logging() + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( @@ -682,6 +696,29 @@ def __init__( api_audience=self._client_options.api_audience, ) + if "async" not in str(self._transport): + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ): # pragma: NO COVER + _LOGGER.debug( + "Created client `google.bigtable_v2.BigtableClient`.", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "universeDomain": getattr( + self._transport._credentials, "universe_domain", "" + ), + "credentialsType": f"{type(self._transport._credentials).__module__}.{type(self._transport._credentials).__qualname__}", + "credentialsInfo": getattr( + self.transport._credentials, "get_cred_info", lambda: None + )(), + } + if hasattr(self._transport, "_credentials") + else { + "serviceName": "google.bigtable.v2.Bigtable", + "credentialsType": None, + }, + ) + def read_rows( self, request: Optional[Union[bigtable.ReadRowsRequest, dict]] = None, @@ -690,7 +727,7 @@ def read_rows( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.ReadRowsResponse]: r"""Streams back the contents of all requested rows in key order, optionally applying the same Reader filter to @@ -725,8 +762,10 @@ def read_rows( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: @@ -807,7 +846,7 @@ def sample_row_keys( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.SampleRowKeysResponse]: r"""Returns a sample of row keys in the table. The returned row keys will delimit contiguous sections of @@ -841,8 +880,10 @@ def sample_row_keys( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: @@ -925,7 +966,7 @@ def mutate_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.MutateRowResponse: r"""Mutates a row atomically. Cells already present in the row are left unchanged unless explicitly changed by ``mutation``. @@ -974,8 +1015,10 @@ def mutate_row( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.MutateRowResponse: @@ -1061,7 +1104,7 @@ def mutate_rows( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.MutateRowsResponse]: r"""Mutates multiple rows in a batch. Each individual row is mutated atomically as in MutateRow, but the entire @@ -1107,8 +1150,10 @@ def mutate_rows( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: @@ -1195,7 +1240,7 @@ def check_and_mutate_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Mutates a row atomically based on the output of a predicate Reader filter. @@ -1266,8 +1311,10 @@ def check_and_mutate_row( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: @@ -1365,7 +1412,7 @@ def ping_and_warm( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.PingAndWarmResponse: r"""Warm up associated instance metadata for this connection. This call is not required but may be useful @@ -1395,8 +1442,10 @@ def ping_and_warm( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.PingAndWarmResponse: @@ -1469,7 +1518,7 @@ def read_modify_write_row( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Modifies a row atomically on the server. The method reads the latest existing timestamp and value from the @@ -1524,8 +1573,10 @@ def read_modify_write_row( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: @@ -1612,7 +1663,7 @@ def generate_initial_change_stream_partitions( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.GenerateInitialChangeStreamPartitionsResponse]: r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. Returns the current list of partitions that make up the table's @@ -1647,8 +1698,10 @@ def generate_initial_change_stream_partitions( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: @@ -1717,7 +1770,7 @@ def read_change_stream( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.ReadChangeStreamResponse]: r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. Reads changes from a table's change stream. @@ -1751,8 +1804,10 @@ def read_change_stream( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: @@ -1817,7 +1872,7 @@ def execute_query( app_profile_id: Optional[str] = None, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> Iterable[bigtable.ExecuteQueryResponse]: r"""Executes a BTQL query against a particular Cloud Bigtable instance. @@ -1850,8 +1905,10 @@ def execute_query( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: Iterable[google.cloud.bigtable_v2.types.ExecuteQueryResponse]: diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index febdd441d..08be938c2 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -13,6 +13,9 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import json +import logging as std_logging +import pickle import warnings from typing import Callable, Dict, Optional, Sequence, Tuple, Union @@ -21,12 +24,90 @@ import google.auth # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from google.cloud.bigtable_v2.types import bigtable from .base import BigtableTransport, DEFAULT_CLIENT_INFO +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientInterceptor(grpc.UnaryUnaryClientInterceptor): # pragma: NO COVER + def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": client_call_details.method, + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + + response = continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = response.result() + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response for {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": client_call_details.method, + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableGrpcTransport(BigtableTransport): """gRPC backend transport for Bigtable. @@ -181,7 +262,12 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientInterceptor() + self._logged_channel = grpc.intercept_channel( + self._grpc_channel, self._interceptor + ) + + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @classmethod @@ -260,7 +346,7 @@ def read_rows( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_rows" not in self._stubs: - self._stubs["read_rows"] = self.grpc_channel.unary_stream( + self._stubs["read_rows"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ReadRows", request_serializer=bigtable.ReadRowsRequest.serialize, response_deserializer=bigtable.ReadRowsResponse.deserialize, @@ -290,7 +376,7 @@ def sample_row_keys( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "sample_row_keys" not in self._stubs: - self._stubs["sample_row_keys"] = self.grpc_channel.unary_stream( + self._stubs["sample_row_keys"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/SampleRowKeys", request_serializer=bigtable.SampleRowKeysRequest.serialize, response_deserializer=bigtable.SampleRowKeysResponse.deserialize, @@ -317,7 +403,7 @@ def mutate_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_row" not in self._stubs: - self._stubs["mutate_row"] = self.grpc_channel.unary_unary( + self._stubs["mutate_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/MutateRow", request_serializer=bigtable.MutateRowRequest.serialize, response_deserializer=bigtable.MutateRowResponse.deserialize, @@ -345,7 +431,7 @@ def mutate_rows( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_rows" not in self._stubs: - self._stubs["mutate_rows"] = self.grpc_channel.unary_stream( + self._stubs["mutate_rows"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/MutateRows", request_serializer=bigtable.MutateRowsRequest.serialize, response_deserializer=bigtable.MutateRowsResponse.deserialize, @@ -374,7 +460,7 @@ def check_and_mutate_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "check_and_mutate_row" not in self._stubs: - self._stubs["check_and_mutate_row"] = self.grpc_channel.unary_unary( + self._stubs["check_and_mutate_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/CheckAndMutateRow", request_serializer=bigtable.CheckAndMutateRowRequest.serialize, response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, @@ -402,7 +488,7 @@ def ping_and_warm( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "ping_and_warm" not in self._stubs: - self._stubs["ping_and_warm"] = self.grpc_channel.unary_unary( + self._stubs["ping_and_warm"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/PingAndWarm", request_serializer=bigtable.PingAndWarmRequest.serialize, response_deserializer=bigtable.PingAndWarmResponse.deserialize, @@ -436,7 +522,7 @@ def read_modify_write_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_modify_write_row" not in self._stubs: - self._stubs["read_modify_write_row"] = self.grpc_channel.unary_unary( + self._stubs["read_modify_write_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, @@ -471,7 +557,7 @@ def generate_initial_change_stream_partitions( if "generate_initial_change_stream_partitions" not in self._stubs: self._stubs[ "generate_initial_change_stream_partitions" - ] = self.grpc_channel.unary_stream( + ] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions", request_serializer=bigtable.GenerateInitialChangeStreamPartitionsRequest.serialize, response_deserializer=bigtable.GenerateInitialChangeStreamPartitionsResponse.deserialize, @@ -502,7 +588,7 @@ def read_change_stream( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_change_stream" not in self._stubs: - self._stubs["read_change_stream"] = self.grpc_channel.unary_stream( + self._stubs["read_change_stream"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ReadChangeStream", request_serializer=bigtable.ReadChangeStreamRequest.serialize, response_deserializer=bigtable.ReadChangeStreamResponse.deserialize, @@ -529,7 +615,7 @@ def execute_query( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "execute_query" not in self._stubs: - self._stubs["execute_query"] = self.grpc_channel.unary_stream( + self._stubs["execute_query"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ExecuteQuery", request_serializer=bigtable.ExecuteQueryRequest.serialize, response_deserializer=bigtable.ExecuteQueryResponse.deserialize, @@ -537,7 +623,7 @@ def execute_query( return self._stubs["execute_query"] def close(self): - self.grpc_channel.close() + self._logged_channel.close() @property def kind(self) -> str: diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 6f6e1fe85..ee9b14bde 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -14,6 +14,9 @@ # limitations under the License. # import inspect +import json +import pickle +import logging as std_logging import warnings from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union @@ -23,14 +26,93 @@ from google.api_core import retry_async as retries from google.auth import credentials as ga_credentials # type: ignore from google.auth.transport.grpc import SslCredentials # type: ignore +from google.protobuf.json_format import MessageToJson +import google.protobuf.message import grpc # type: ignore +import proto # type: ignore from grpc.experimental import aio # type: ignore from google.cloud.bigtable_v2.types import bigtable from .base import BigtableTransport, DEFAULT_CLIENT_INFO from .grpc import BigtableGrpcTransport +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = std_logging.getLogger(__name__) + + +class _LoggingClientAIOInterceptor( + grpc.aio.UnaryUnaryClientInterceptor +): # pragma: NO COVER + async def intercept_unary_unary(self, continuation, client_call_details, request): + logging_enabled = CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + std_logging.DEBUG + ) + if logging_enabled: # pragma: NO COVER + request_metadata = client_call_details.metadata + if isinstance(request, proto.Message): + request_payload = type(request).to_json(request) + elif isinstance(request, google.protobuf.message.Message): + request_payload = MessageToJson(request) + else: + request_payload = f"{type(request).__name__}: {pickle.dumps(request)}" + + request_metadata = { + key: value.decode("utf-8") if isinstance(value, bytes) else value + for key, value in request_metadata + } + grpc_request = { + "payload": request_payload, + "requestMethod": "grpc", + "metadata": dict(request_metadata), + } + _LOGGER.debug( + f"Sending request for {client_call_details.method}", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": str(client_call_details.method), + "request": grpc_request, + "metadata": grpc_request["metadata"], + }, + ) + response = await continuation(client_call_details, request) + if logging_enabled: # pragma: NO COVER + response_metadata = await response.trailing_metadata() + # Convert gRPC metadata `` to list of tuples + metadata = ( + dict([(k, str(v)) for k, v in response_metadata]) + if response_metadata + else None + ) + result = await response + if isinstance(result, proto.Message): + response_payload = type(result).to_json(result) + elif isinstance(result, google.protobuf.message.Message): + response_payload = MessageToJson(result) + else: + response_payload = f"{type(result).__name__}: {pickle.dumps(result)}" + grpc_response = { + "payload": response_payload, + "metadata": metadata, + "status": "OK", + } + _LOGGER.debug( + f"Received response to rpc {client_call_details.method}.", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": str(client_call_details.method), + "response": grpc_response, + "metadata": grpc_response["metadata"], + }, + ) + return response + class BigtableGrpcAsyncIOTransport(BigtableTransport): """gRPC AsyncIO backend transport for Bigtable. @@ -228,10 +310,13 @@ def __init__( ], ) - # Wrap messages. This must be done after self._grpc_channel exists + self._interceptor = _LoggingClientAIOInterceptor() + self._grpc_channel._unary_unary_interceptors.append(self._interceptor) + self._logged_channel = self._grpc_channel self._wrap_with_kind = ( "kind" in inspect.signature(gapic_v1.method_async.wrap_method).parameters ) + # Wrap messages. This must be done after self._logged_channel exists self._prep_wrapped_messages(client_info) @property @@ -268,7 +353,7 @@ def read_rows( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_rows" not in self._stubs: - self._stubs["read_rows"] = self.grpc_channel.unary_stream( + self._stubs["read_rows"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ReadRows", request_serializer=bigtable.ReadRowsRequest.serialize, response_deserializer=bigtable.ReadRowsResponse.deserialize, @@ -300,7 +385,7 @@ def sample_row_keys( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "sample_row_keys" not in self._stubs: - self._stubs["sample_row_keys"] = self.grpc_channel.unary_stream( + self._stubs["sample_row_keys"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/SampleRowKeys", request_serializer=bigtable.SampleRowKeysRequest.serialize, response_deserializer=bigtable.SampleRowKeysResponse.deserialize, @@ -327,7 +412,7 @@ def mutate_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_row" not in self._stubs: - self._stubs["mutate_row"] = self.grpc_channel.unary_unary( + self._stubs["mutate_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/MutateRow", request_serializer=bigtable.MutateRowRequest.serialize, response_deserializer=bigtable.MutateRowResponse.deserialize, @@ -355,7 +440,7 @@ def mutate_rows( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "mutate_rows" not in self._stubs: - self._stubs["mutate_rows"] = self.grpc_channel.unary_stream( + self._stubs["mutate_rows"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/MutateRows", request_serializer=bigtable.MutateRowsRequest.serialize, response_deserializer=bigtable.MutateRowsResponse.deserialize, @@ -385,7 +470,7 @@ def check_and_mutate_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "check_and_mutate_row" not in self._stubs: - self._stubs["check_and_mutate_row"] = self.grpc_channel.unary_unary( + self._stubs["check_and_mutate_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/CheckAndMutateRow", request_serializer=bigtable.CheckAndMutateRowRequest.serialize, response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, @@ -415,7 +500,7 @@ def ping_and_warm( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "ping_and_warm" not in self._stubs: - self._stubs["ping_and_warm"] = self.grpc_channel.unary_unary( + self._stubs["ping_and_warm"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/PingAndWarm", request_serializer=bigtable.PingAndWarmRequest.serialize, response_deserializer=bigtable.PingAndWarmResponse.deserialize, @@ -450,7 +535,7 @@ def read_modify_write_row( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_modify_write_row" not in self._stubs: - self._stubs["read_modify_write_row"] = self.grpc_channel.unary_unary( + self._stubs["read_modify_write_row"] = self._logged_channel.unary_unary( "/google.bigtable.v2.Bigtable/ReadModifyWriteRow", request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, @@ -485,7 +570,7 @@ def generate_initial_change_stream_partitions( if "generate_initial_change_stream_partitions" not in self._stubs: self._stubs[ "generate_initial_change_stream_partitions" - ] = self.grpc_channel.unary_stream( + ] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions", request_serializer=bigtable.GenerateInitialChangeStreamPartitionsRequest.serialize, response_deserializer=bigtable.GenerateInitialChangeStreamPartitionsResponse.deserialize, @@ -516,7 +601,7 @@ def read_change_stream( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "read_change_stream" not in self._stubs: - self._stubs["read_change_stream"] = self.grpc_channel.unary_stream( + self._stubs["read_change_stream"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ReadChangeStream", request_serializer=bigtable.ReadChangeStreamRequest.serialize, response_deserializer=bigtable.ReadChangeStreamResponse.deserialize, @@ -545,7 +630,7 @@ def execute_query( # gRPC handles serialization and deserialization, so we just need # to pass in the functions for each. if "execute_query" not in self._stubs: - self._stubs["execute_query"] = self.grpc_channel.unary_stream( + self._stubs["execute_query"] = self._logged_channel.unary_stream( "/google.bigtable.v2.Bigtable/ExecuteQuery", request_serializer=bigtable.ExecuteQueryRequest.serialize, response_deserializer=bigtable.ExecuteQueryResponse.deserialize, @@ -623,7 +708,7 @@ def _wrap_method(self, func, *args, **kwargs): return gapic_v1.method_async.wrap_method(func, *args, **kwargs) def close(self): - return self.grpc_channel.close() + return self._logged_channel.close() @property def kind(self) -> str: diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index 221b04b8a..d1ce6cadd 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -13,9 +13,10 @@ # See the License for the specific language governing permissions and # limitations under the License. # +import logging +import json # type: ignore from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore from google.auth import credentials as ga_credentials # type: ignore from google.api_core import exceptions as core_exceptions from google.api_core import retry as retries @@ -42,6 +43,14 @@ except AttributeError: # pragma: NO COVER OptionalRetry = Union[retries.Retry, object, None] # type: ignore +try: + from google.api_core import client_logging # type: ignore + + CLIENT_LOGGING_SUPPORTED = True # pragma: NO COVER +except ImportError: # pragma: NO COVER + CLIENT_LOGGING_SUPPORTED = False + +_LOGGER = logging.getLogger(__name__) DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, @@ -154,8 +163,10 @@ def post_sample_row_keys(self, response): def pre_check_and_mutate_row( self, request: bigtable.CheckAndMutateRowRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable.CheckAndMutateRowRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable.CheckAndMutateRowRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for check_and_mutate_row Override in a subclass to manipulate the request or metadata @@ -175,8 +186,10 @@ def post_check_and_mutate_row( return response def pre_execute_query( - self, request: bigtable.ExecuteQueryRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.ExecuteQueryRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.ExecuteQueryRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.ExecuteQueryRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for execute_query Override in a subclass to manipulate the request or metadata @@ -198,9 +211,10 @@ def post_execute_query( def pre_generate_initial_change_stream_partitions( self, request: bigtable.GenerateInitialChangeStreamPartitionsRequest, - metadata: Sequence[Tuple[str, str]], + metadata: Sequence[Tuple[str, Union[str, bytes]]], ) -> Tuple[ - bigtable.GenerateInitialChangeStreamPartitionsRequest, Sequence[Tuple[str, str]] + bigtable.GenerateInitialChangeStreamPartitionsRequest, + Sequence[Tuple[str, Union[str, bytes]]], ]: """Pre-rpc interceptor for generate_initial_change_stream_partitions @@ -221,8 +235,10 @@ def post_generate_initial_change_stream_partitions( return response def pre_mutate_row( - self, request: bigtable.MutateRowRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.MutateRowRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.MutateRowRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.MutateRowRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for mutate_row Override in a subclass to manipulate the request or metadata @@ -242,8 +258,10 @@ def post_mutate_row( return response def pre_mutate_rows( - self, request: bigtable.MutateRowsRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.MutateRowsRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.MutateRowsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.MutateRowsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for mutate_rows Override in a subclass to manipulate the request or metadata @@ -263,8 +281,10 @@ def post_mutate_rows( return response def pre_ping_and_warm( - self, request: bigtable.PingAndWarmRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.PingAndWarmRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.PingAndWarmRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.PingAndWarmRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for ping_and_warm Override in a subclass to manipulate the request or metadata @@ -286,8 +306,10 @@ def post_ping_and_warm( def pre_read_change_stream( self, request: bigtable.ReadChangeStreamRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable.ReadChangeStreamRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable.ReadChangeStreamRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for read_change_stream Override in a subclass to manipulate the request or metadata @@ -309,8 +331,10 @@ def post_read_change_stream( def pre_read_modify_write_row( self, request: bigtable.ReadModifyWriteRowRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable.ReadModifyWriteRowRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[ + bigtable.ReadModifyWriteRowRequest, Sequence[Tuple[str, Union[str, bytes]]] + ]: """Pre-rpc interceptor for read_modify_write_row Override in a subclass to manipulate the request or metadata @@ -330,8 +354,10 @@ def post_read_modify_write_row( return response def pre_read_rows( - self, request: bigtable.ReadRowsRequest, metadata: Sequence[Tuple[str, str]] - ) -> Tuple[bigtable.ReadRowsRequest, Sequence[Tuple[str, str]]]: + self, + request: bigtable.ReadRowsRequest, + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.ReadRowsRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for read_rows Override in a subclass to manipulate the request or metadata @@ -353,8 +379,8 @@ def post_read_rows( def pre_sample_row_keys( self, request: bigtable.SampleRowKeysRequest, - metadata: Sequence[Tuple[str, str]], - ) -> Tuple[bigtable.SampleRowKeysRequest, Sequence[Tuple[str, str]]]: + metadata: Sequence[Tuple[str, Union[str, bytes]]], + ) -> Tuple[bigtable.SampleRowKeysRequest, Sequence[Tuple[str, Union[str, bytes]]]]: """Pre-rpc interceptor for sample_row_keys Override in a subclass to manipulate the request or metadata @@ -496,7 +522,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.CheckAndMutateRowResponse: r"""Call the check and mutate row method over HTTP. @@ -507,8 +533,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.CheckAndMutateRowResponse: @@ -520,6 +548,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseCheckAndMutateRow._get_http_options() ) + request, metadata = self._interceptor.pre_check_and_mutate_row( request, metadata ) @@ -536,6 +565,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.CheckAndMutateRow", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "CheckAndMutateRow", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._CheckAndMutateRow._get_response( self._host, @@ -557,7 +613,31 @@ def __call__( pb_resp = bigtable.CheckAndMutateRowResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_check_and_mutate_row(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.CheckAndMutateRowResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.check_and_mutate_row", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "CheckAndMutateRow", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ExecuteQuery(_BaseBigtableRestTransport._BaseExecuteQuery, BigtableRestStub): @@ -594,7 +674,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the execute query method over HTTP. @@ -605,8 +685,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.ExecuteQueryResponse: @@ -618,6 +700,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseExecuteQuery._get_http_options() ) + request, metadata = self._interceptor.pre_execute_query(request, metadata) transcoded_request = ( _BaseBigtableRestTransport._BaseExecuteQuery._get_transcoded_request( @@ -636,6 +719,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.ExecuteQuery", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ExecuteQuery", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._ExecuteQuery._get_response( self._host, @@ -656,6 +766,7 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.ExecuteQueryResponse ) + resp = self._interceptor.post_execute_query(resp) return resp @@ -696,7 +807,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the generate initial change stream partitions method over HTTP. @@ -710,8 +821,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.GenerateInitialChangeStreamPartitionsResponse: @@ -725,6 +838,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseGenerateInitialChangeStreamPartitions._get_http_options() ) + ( request, metadata, @@ -744,6 +858,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.GenerateInitialChangeStreamPartitions", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "GenerateInitialChangeStreamPartitions", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._GenerateInitialChangeStreamPartitions._get_response( self._host, @@ -764,6 +905,7 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.GenerateInitialChangeStreamPartitionsResponse ) + resp = self._interceptor.post_generate_initial_change_stream_partitions( resp ) @@ -802,7 +944,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.MutateRowResponse: r"""Call the mutate row method over HTTP. @@ -813,8 +955,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.MutateRowResponse: @@ -824,6 +968,7 @@ def __call__( """ http_options = _BaseBigtableRestTransport._BaseMutateRow._get_http_options() + request, metadata = self._interceptor.pre_mutate_row(request, metadata) transcoded_request = ( _BaseBigtableRestTransport._BaseMutateRow._get_transcoded_request( @@ -842,6 +987,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.MutateRow", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "MutateRow", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._MutateRow._get_response( self._host, @@ -863,7 +1035,29 @@ def __call__( pb_resp = bigtable.MutateRowResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_mutate_row(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.MutateRowResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.mutate_row", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "MutateRow", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _MutateRows(_BaseBigtableRestTransport._BaseMutateRows, BigtableRestStub): @@ -900,7 +1094,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the mutate rows method over HTTP. @@ -911,8 +1105,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.MutateRowsResponse: @@ -924,6 +1120,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseMutateRows._get_http_options() ) + request, metadata = self._interceptor.pre_mutate_rows(request, metadata) transcoded_request = ( _BaseBigtableRestTransport._BaseMutateRows._get_transcoded_request( @@ -942,6 +1139,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.MutateRows", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "MutateRows", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._MutateRows._get_response( self._host, @@ -962,6 +1186,7 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.MutateRowsResponse ) + resp = self._interceptor.post_mutate_rows(resp) return resp @@ -998,7 +1223,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.PingAndWarmResponse: r"""Call the ping and warm method over HTTP. @@ -1009,8 +1234,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.PingAndWarmResponse: @@ -1023,6 +1250,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BasePingAndWarm._get_http_options() ) + request, metadata = self._interceptor.pre_ping_and_warm(request, metadata) transcoded_request = ( _BaseBigtableRestTransport._BasePingAndWarm._get_transcoded_request( @@ -1041,6 +1269,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.PingAndWarm", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "PingAndWarm", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._PingAndWarm._get_response( self._host, @@ -1062,7 +1317,29 @@ def __call__( pb_resp = bigtable.PingAndWarmResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_ping_and_warm(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.PingAndWarmResponse.to_json(response) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.ping_and_warm", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "PingAndWarm", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ReadChangeStream( @@ -1101,7 +1378,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the read change stream method over HTTP. @@ -1113,8 +1390,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.ReadChangeStreamResponse: @@ -1127,6 +1406,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseReadChangeStream._get_http_options() ) + request, metadata = self._interceptor.pre_read_change_stream( request, metadata ) @@ -1147,6 +1427,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.ReadChangeStream", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadChangeStream", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._ReadChangeStream._get_response( self._host, @@ -1167,6 +1474,7 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.ReadChangeStreamResponse ) + resp = self._interceptor.post_read_change_stream(resp) return resp @@ -1205,7 +1513,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> bigtable.ReadModifyWriteRowResponse: r"""Call the read modify write row method over HTTP. @@ -1216,8 +1524,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.ReadModifyWriteRowResponse: @@ -1229,6 +1539,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseReadModifyWriteRow._get_http_options() ) + request, metadata = self._interceptor.pre_read_modify_write_row( request, metadata ) @@ -1245,6 +1556,33 @@ def __call__( transcoded_request ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.ReadModifyWriteRow", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadModifyWriteRow", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._ReadModifyWriteRow._get_response( self._host, @@ -1266,7 +1604,31 @@ def __call__( pb_resp = bigtable.ReadModifyWriteRowResponse.pb(resp) json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) + resp = self._interceptor.post_read_modify_write_row(resp) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + try: + response_payload = bigtable.ReadModifyWriteRowResponse.to_json( + response + ) + except: + response_payload = None + http_response = { + "payload": response_payload, + "headers": dict(response.headers), + "status": response.status_code, + } + _LOGGER.debug( + "Received response for google.bigtable_v2.BigtableClient.read_modify_write_row", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadModifyWriteRow", + "metadata": http_response["headers"], + "httpResponse": http_response, + }, + ) return resp class _ReadRows(_BaseBigtableRestTransport._BaseReadRows, BigtableRestStub): @@ -1303,7 +1665,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the read rows method over HTTP. @@ -1314,8 +1676,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.ReadRowsResponse: @@ -1325,6 +1689,7 @@ def __call__( """ http_options = _BaseBigtableRestTransport._BaseReadRows._get_http_options() + request, metadata = self._interceptor.pre_read_rows(request, metadata) transcoded_request = ( _BaseBigtableRestTransport._BaseReadRows._get_transcoded_request( @@ -1343,6 +1708,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.ReadRows", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "ReadRows", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._ReadRows._get_response( self._host, @@ -1361,6 +1753,7 @@ def __call__( # Return the response resp = rest_streaming.ResponseIterator(response, bigtable.ReadRowsResponse) + resp = self._interceptor.post_read_rows(resp) return resp @@ -1399,7 +1792,7 @@ def __call__( *, retry: OptionalRetry = gapic_v1.method.DEFAULT, timeout: Optional[float] = None, - metadata: Sequence[Tuple[str, str]] = (), + metadata: Sequence[Tuple[str, Union[str, bytes]]] = (), ) -> rest_streaming.ResponseIterator: r"""Call the sample row keys method over HTTP. @@ -1410,8 +1803,10 @@ def __call__( retry (google.api_core.retry.Retry): Designation of what errors, if any, should be retried. timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. + metadata (Sequence[Tuple[str, Union[str, bytes]]]): Key/value pairs which should be + sent along with the request as metadata. Normally, each value must be of type `str`, + but for metadata keys ending with the suffix `-bin`, the corresponding values must + be of type `bytes`. Returns: ~.bigtable.SampleRowKeysResponse: @@ -1423,6 +1818,7 @@ def __call__( http_options = ( _BaseBigtableRestTransport._BaseSampleRowKeys._get_http_options() ) + request, metadata = self._interceptor.pre_sample_row_keys(request, metadata) transcoded_request = ( _BaseBigtableRestTransport._BaseSampleRowKeys._get_transcoded_request( @@ -1437,6 +1833,33 @@ def __call__( ) ) + if CLIENT_LOGGING_SUPPORTED and _LOGGER.isEnabledFor( + logging.DEBUG + ): # pragma: NO COVER + request_url = "{host}{uri}".format( + host=self._host, uri=transcoded_request["uri"] + ) + method = transcoded_request["method"] + try: + request_payload = type(request).to_json(request) + except: + request_payload = None + http_request = { + "payload": request_payload, + "requestMethod": method, + "requestUrl": request_url, + "headers": dict(metadata), + } + _LOGGER.debug( + f"Sending request for google.bigtable_v2.BigtableClient.SampleRowKeys", + extra={ + "serviceName": "google.bigtable.v2.Bigtable", + "rpcName": "SampleRowKeys", + "httpRequest": http_request, + "metadata": http_request["headers"], + }, + ) + # Send the request response = BigtableRestTransport._SampleRowKeys._get_response( self._host, @@ -1456,6 +1879,7 @@ def __call__( resp = rest_streaming.ResponseIterator( response, bigtable.SampleRowKeysResponse ) + resp = self._interceptor.post_sample_row_keys(resp) return resp diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index 3f79e11a4..c050fb725 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -8647,6 +8647,7 @@ def test_create_instance_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_instance(request) @@ -8703,6 +8704,7 @@ def test_create_instance_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_instance(**mock_args) @@ -8835,6 +8837,7 @@ def test_get_instance_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_instance(request) @@ -8880,6 +8883,7 @@ def test_get_instance_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_instance(**mock_args) @@ -9013,6 +9017,7 @@ def test_list_instances_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_instances(request) @@ -9058,6 +9063,7 @@ def test_list_instances_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_instances(**mock_args) @@ -9186,6 +9192,7 @@ def test_update_instance_rest_required_fields(request_type=instance.Instance): response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_instance(request) @@ -9309,6 +9316,7 @@ def test_partial_update_instance_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.partial_update_instance(request) @@ -9361,6 +9369,7 @@ def test_partial_update_instance_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.partial_update_instance(**mock_args) @@ -9489,6 +9498,7 @@ def test_delete_instance_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_instance(request) @@ -9532,6 +9542,7 @@ def test_delete_instance_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_instance(**mock_args) @@ -9672,6 +9683,7 @@ def test_create_cluster_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_cluster(request) @@ -9732,6 +9744,7 @@ def test_create_cluster_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_cluster(**mock_args) @@ -9864,6 +9877,7 @@ def test_get_cluster_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_cluster(request) @@ -9909,6 +9923,7 @@ def test_get_cluster_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_cluster(**mock_args) @@ -10041,6 +10056,7 @@ def test_list_clusters_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_clusters(request) @@ -10086,6 +10102,7 @@ def test_list_clusters_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_clusters(**mock_args) @@ -10260,6 +10277,7 @@ def test_partial_update_cluster_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.partial_update_cluster(request) @@ -10314,6 +10332,7 @@ def test_partial_update_cluster_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.partial_update_cluster(**mock_args) @@ -10443,6 +10462,7 @@ def test_delete_cluster_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_cluster(request) @@ -10486,6 +10506,7 @@ def test_delete_cluster_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_cluster(**mock_args) @@ -10635,6 +10656,7 @@ def test_create_app_profile_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_app_profile(request) @@ -10702,6 +10724,7 @@ def test_create_app_profile_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_app_profile(**mock_args) @@ -10835,6 +10858,7 @@ def test_get_app_profile_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_app_profile(request) @@ -10882,6 +10906,7 @@ def test_get_app_profile_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_app_profile(**mock_args) @@ -11024,6 +11049,7 @@ def test_list_app_profiles_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_app_profiles(request) @@ -11077,6 +11103,7 @@ def test_list_app_profiles_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_app_profiles(**mock_args) @@ -11279,6 +11306,7 @@ def test_update_app_profile_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_app_profile(request) @@ -11340,6 +11368,7 @@ def test_update_app_profile_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_app_profile(**mock_args) @@ -11482,6 +11511,7 @@ def test_delete_app_profile_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_app_profile(request) @@ -11541,6 +11571,7 @@ def test_delete_app_profile_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_app_profile(**mock_args) @@ -11671,6 +11702,7 @@ def test_get_iam_policy_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_iam_policy(request) @@ -11714,6 +11746,7 @@ def test_get_iam_policy_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_iam_policy(**mock_args) @@ -11844,6 +11877,7 @@ def test_set_iam_policy_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.set_iam_policy(request) @@ -11895,6 +11929,7 @@ def test_set_iam_policy_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.set_iam_policy(**mock_args) @@ -12033,6 +12068,7 @@ def test_test_iam_permissions_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.test_iam_permissions(request) @@ -12085,6 +12121,7 @@ def test_test_iam_permissions_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.test_iam_permissions(**mock_args) @@ -12230,6 +12267,7 @@ def test_list_hot_tablets_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_hot_tablets(request) @@ -12287,6 +12325,7 @@ def test_list_hot_tablets_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_hot_tablets(**mock_args) @@ -13563,6 +13602,7 @@ def test_create_instance_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_instance(request) @@ -13593,6 +13633,7 @@ def test_create_instance_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_instance(request) # Establish that the response is the type that we expect. @@ -13634,6 +13675,7 @@ def test_create_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -13678,6 +13720,7 @@ def test_get_instance_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_instance(request) @@ -13717,6 +13760,7 @@ def test_get_instance_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_instance(request) # Establish that the response is the type that we expect. @@ -13761,6 +13805,7 @@ def test_get_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = instance.Instance.to_json(instance.Instance()) req.return_value.content = return_value @@ -13805,6 +13850,7 @@ def test_list_instances_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_instances(request) @@ -13841,6 +13887,7 @@ def test_list_instances_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_instances(request) assert response.raw_page is response @@ -13884,6 +13931,7 @@ def test_list_instances_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_instance_admin.ListInstancesResponse.to_json( bigtable_instance_admin.ListInstancesResponse() ) @@ -13928,6 +13976,7 @@ def test_update_instance_rest_bad_request(request_type=instance.Instance): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_instance(request) @@ -13967,6 +14016,7 @@ def test_update_instance_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_instance(request) # Establish that the response is the type that we expect. @@ -14009,6 +14059,7 @@ def test_update_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = instance.Instance.to_json(instance.Instance()) req.return_value.content = return_value @@ -14053,6 +14104,7 @@ def test_partial_update_instance_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.partial_update_instance(request) @@ -14161,6 +14213,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.partial_update_instance(request) # Establish that the response is the type that we expect. @@ -14202,6 +14255,7 @@ def test_partial_update_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -14246,6 +14300,7 @@ def test_delete_instance_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_instance(request) @@ -14276,6 +14331,7 @@ def test_delete_instance_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_instance(request) # Establish that the response is the type that we expect. @@ -14312,6 +14368,7 @@ def test_delete_instance_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = bigtable_instance_admin.DeleteInstanceRequest() metadata = [ @@ -14352,6 +14409,7 @@ def test_create_cluster_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_cluster(request) @@ -14470,6 +14528,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_cluster(request) # Establish that the response is the type that we expect. @@ -14511,6 +14570,7 @@ def test_create_cluster_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -14555,6 +14615,7 @@ def test_get_cluster_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_cluster(request) @@ -14595,6 +14656,7 @@ def test_get_cluster_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_cluster(request) # Establish that the response is the type that we expect. @@ -14643,6 +14705,7 @@ def test_get_cluster_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = instance.Cluster.to_json(instance.Cluster()) req.return_value.content = return_value @@ -14687,6 +14750,7 @@ def test_list_clusters_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_clusters(request) @@ -14723,6 +14787,7 @@ def test_list_clusters_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_clusters(request) assert response.raw_page is response @@ -14766,6 +14831,7 @@ def test_list_clusters_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_instance_admin.ListClustersResponse.to_json( bigtable_instance_admin.ListClustersResponse() ) @@ -14810,6 +14876,7 @@ def test_update_cluster_rest_bad_request(request_type=instance.Cluster): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_cluster(request) @@ -14840,6 +14907,7 @@ def test_update_cluster_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_cluster(request) # Establish that the response is the type that we expect. @@ -14879,6 +14947,7 @@ def test_update_cluster_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -14925,6 +14994,7 @@ def test_partial_update_cluster_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.partial_update_cluster(request) @@ -15047,6 +15117,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.partial_update_cluster(request) # Establish that the response is the type that we expect. @@ -15088,6 +15159,7 @@ def test_partial_update_cluster_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -15132,6 +15204,7 @@ def test_delete_cluster_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_cluster(request) @@ -15162,6 +15235,7 @@ def test_delete_cluster_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_cluster(request) # Establish that the response is the type that we expect. @@ -15198,6 +15272,7 @@ def test_delete_cluster_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = bigtable_instance_admin.DeleteClusterRequest() metadata = [ @@ -15238,6 +15313,7 @@ def test_create_app_profile_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_app_profile(request) @@ -15361,6 +15437,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_app_profile(request) # Establish that the response is the type that we expect. @@ -15403,6 +15480,7 @@ def test_create_app_profile_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = instance.AppProfile.to_json(instance.AppProfile()) req.return_value.content = return_value @@ -15447,6 +15525,7 @@ def test_get_app_profile_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_app_profile(request) @@ -15485,6 +15564,7 @@ def test_get_app_profile_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_app_profile(request) # Establish that the response is the type that we expect. @@ -15527,6 +15607,7 @@ def test_get_app_profile_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = instance.AppProfile.to_json(instance.AppProfile()) req.return_value.content = return_value @@ -15571,6 +15652,7 @@ def test_list_app_profiles_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_app_profiles(request) @@ -15607,6 +15689,7 @@ def test_list_app_profiles_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_app_profiles(request) # Establish that the response is the type that we expect. @@ -15648,6 +15731,7 @@ def test_list_app_profiles_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_instance_admin.ListAppProfilesResponse.to_json( bigtable_instance_admin.ListAppProfilesResponse() ) @@ -15698,6 +15782,7 @@ def test_update_app_profile_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_app_profile(request) @@ -15817,6 +15902,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_app_profile(request) # Establish that the response is the type that we expect. @@ -15858,6 +15944,7 @@ def test_update_app_profile_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -15902,6 +15989,7 @@ def test_delete_app_profile_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_app_profile(request) @@ -15932,6 +16020,7 @@ def test_delete_app_profile_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_app_profile(request) # Establish that the response is the type that we expect. @@ -15968,6 +16057,7 @@ def test_delete_app_profile_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = bigtable_instance_admin.DeleteAppProfileRequest() metadata = [ @@ -16008,6 +16098,7 @@ def test_get_iam_policy_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_iam_policy(request) @@ -16041,6 +16132,7 @@ def test_get_iam_policy_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_iam_policy(request) # Establish that the response is the type that we expect. @@ -16080,6 +16172,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(policy_pb2.Policy()) req.return_value.content = return_value @@ -16124,6 +16217,7 @@ def test_set_iam_policy_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.set_iam_policy(request) @@ -16157,6 +16251,7 @@ def test_set_iam_policy_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.set_iam_policy(request) # Establish that the response is the type that we expect. @@ -16196,6 +16291,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(policy_pb2.Policy()) req.return_value.content = return_value @@ -16240,6 +16336,7 @@ def test_test_iam_permissions_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.test_iam_permissions(request) @@ -16272,6 +16369,7 @@ def test_test_iam_permissions_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.test_iam_permissions(request) # Establish that the response is the type that we expect. @@ -16310,6 +16408,7 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson( iam_policy_pb2.TestIamPermissionsResponse() ) @@ -16356,6 +16455,7 @@ def test_list_hot_tablets_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_hot_tablets(request) @@ -16391,6 +16491,7 @@ def test_list_hot_tablets_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_hot_tablets(request) # Establish that the response is the type that we expect. @@ -16431,6 +16532,7 @@ def test_list_hot_tablets_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_instance_admin.ListHotTabletsResponse.to_json( bigtable_instance_admin.ListHotTabletsResponse() ) diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index 53788921f..3766bc72b 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -12093,6 +12093,7 @@ def test_create_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_table(request) @@ -12149,6 +12150,7 @@ def test_create_table_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_table(**mock_args) @@ -12296,6 +12298,7 @@ def test_create_table_from_snapshot_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_table_from_snapshot(request) @@ -12350,6 +12353,7 @@ def test_create_table_from_snapshot_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_table_from_snapshot(**mock_args) @@ -12491,6 +12495,7 @@ def test_list_tables_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_tables(request) @@ -12545,6 +12550,7 @@ def test_list_tables_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_tables(**mock_args) @@ -12740,6 +12746,7 @@ def test_get_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_table(request) @@ -12785,6 +12792,7 @@ def test_get_table_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_table(**mock_args) @@ -12914,6 +12922,7 @@ def test_update_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_table(request) @@ -12968,6 +12977,7 @@ def test_update_table_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_table(**mock_args) @@ -13097,6 +13107,7 @@ def test_delete_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_table(request) @@ -13140,6 +13151,7 @@ def test_delete_table_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_table(**mock_args) @@ -13272,6 +13284,7 @@ def test_undelete_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.undelete_table(request) @@ -13315,6 +13328,7 @@ def test_undelete_table_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.undelete_table(**mock_args) @@ -13462,6 +13476,7 @@ def test_create_authorized_view_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_authorized_view(request) @@ -13522,6 +13537,7 @@ def test_create_authorized_view_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_authorized_view(**mock_args) @@ -13670,6 +13686,7 @@ def test_list_authorized_views_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_authorized_views(request) @@ -13724,6 +13741,7 @@ def test_list_authorized_views_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_authorized_views(**mock_args) @@ -13925,6 +13943,7 @@ def test_get_authorized_view_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_authorized_view(request) @@ -13972,6 +13991,7 @@ def test_get_authorized_view_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_authorized_view(**mock_args) @@ -14112,6 +14132,7 @@ def test_update_authorized_view_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_authorized_view(request) @@ -14168,6 +14189,7 @@ def test_update_authorized_view_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_authorized_view(**mock_args) @@ -14304,6 +14326,7 @@ def test_delete_authorized_view_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_authorized_view(request) @@ -14349,6 +14372,7 @@ def test_delete_authorized_view_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_authorized_view(**mock_args) @@ -14486,6 +14510,7 @@ def test_modify_column_families_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.modify_column_families(request) @@ -14544,6 +14569,7 @@ def test_modify_column_families_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.modify_column_families(**mock_args) @@ -14678,6 +14704,7 @@ def test_drop_row_range_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.drop_row_range(request) @@ -14805,6 +14832,7 @@ def test_generate_consistency_token_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.generate_consistency_token(request) @@ -14852,6 +14880,7 @@ def test_generate_consistency_token_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.generate_consistency_token(**mock_args) @@ -14992,6 +15021,7 @@ def test_check_consistency_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.check_consistency(request) @@ -15046,6 +15076,7 @@ def test_check_consistency_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.check_consistency(**mock_args) @@ -15188,6 +15219,7 @@ def test_snapshot_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.snapshot_table(request) @@ -15243,6 +15275,7 @@ def test_snapshot_table_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.snapshot_table(**mock_args) @@ -15377,6 +15410,7 @@ def test_get_snapshot_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_snapshot(request) @@ -15424,6 +15458,7 @@ def test_get_snapshot_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_snapshot(**mock_args) @@ -15562,6 +15597,7 @@ def test_list_snapshots_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_snapshots(request) @@ -15617,6 +15653,7 @@ def test_list_snapshots_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_snapshots(**mock_args) @@ -15810,6 +15847,7 @@ def test_delete_snapshot_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_snapshot(request) @@ -15855,6 +15893,7 @@ def test_delete_snapshot_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_snapshot(**mock_args) @@ -15997,6 +16036,7 @@ def test_create_backup_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_backup(request) @@ -16059,6 +16099,7 @@ def test_create_backup_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_backup(**mock_args) @@ -16192,6 +16233,7 @@ def test_get_backup_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_backup(request) @@ -16239,6 +16281,7 @@ def test_get_backup_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_backup(**mock_args) @@ -16368,6 +16411,7 @@ def test_update_backup_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_backup(request) @@ -16426,6 +16470,7 @@ def test_update_backup_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_backup(**mock_args) @@ -16555,6 +16600,7 @@ def test_delete_backup_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_backup(request) @@ -16600,6 +16646,7 @@ def test_delete_backup_rest_flattened(): json_return_value = "" response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_backup(**mock_args) @@ -16740,6 +16787,7 @@ def test_list_backups_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_backups(request) @@ -16797,6 +16845,7 @@ def test_list_backups_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_backups(**mock_args) @@ -16999,6 +17048,7 @@ def test_restore_table_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.restore_table(request) @@ -17136,6 +17186,7 @@ def test_copy_backup_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.copy_backup(request) @@ -17194,6 +17245,7 @@ def test_copy_backup_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.copy_backup(**mock_args) @@ -17327,6 +17379,7 @@ def test_get_iam_policy_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_iam_policy(request) @@ -17372,6 +17425,7 @@ def test_get_iam_policy_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_iam_policy(**mock_args) @@ -17502,6 +17556,7 @@ def test_set_iam_policy_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.set_iam_policy(request) @@ -17555,6 +17610,7 @@ def test_set_iam_policy_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.set_iam_policy(**mock_args) @@ -17693,6 +17749,7 @@ def test_test_iam_permissions_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.test_iam_permissions(request) @@ -17747,6 +17804,7 @@ def test_test_iam_permissions_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.test_iam_permissions(**mock_args) @@ -19390,6 +19448,7 @@ def test_create_table_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_table(request) @@ -19427,6 +19486,7 @@ def test_create_table_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_table(request) # Establish that the response is the type that we expect. @@ -19469,6 +19529,7 @@ def test_create_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = gba_table.Table.to_json(gba_table.Table()) req.return_value.content = return_value @@ -19513,6 +19574,7 @@ def test_create_table_from_snapshot_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_table_from_snapshot(request) @@ -19543,6 +19605,7 @@ def test_create_table_from_snapshot_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_table_from_snapshot(request) # Establish that the response is the type that we expect. @@ -19584,6 +19647,7 @@ def test_create_table_from_snapshot_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -19628,6 +19692,7 @@ def test_list_tables_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_tables(request) @@ -19663,6 +19728,7 @@ def test_list_tables_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_tables(request) # Establish that the response is the type that we expect. @@ -19703,6 +19769,7 @@ def test_list_tables_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_table_admin.ListTablesResponse.to_json( bigtable_table_admin.ListTablesResponse() ) @@ -19747,6 +19814,7 @@ def test_get_table_rest_bad_request(request_type=bigtable_table_admin.GetTableRe response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_table(request) @@ -19784,6 +19852,7 @@ def test_get_table_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_table(request) # Establish that the response is the type that we expect. @@ -19826,6 +19895,7 @@ def test_get_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = table.Table.to_json(table.Table()) req.return_value.content = return_value @@ -19872,6 +19942,7 @@ def test_update_table_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_table(request) @@ -19990,6 +20061,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_table(request) # Establish that the response is the type that we expect. @@ -20031,6 +20103,7 @@ def test_update_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -20075,6 +20148,7 @@ def test_delete_table_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_table(request) @@ -20105,6 +20179,7 @@ def test_delete_table_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_table(request) # Establish that the response is the type that we expect. @@ -20141,6 +20216,7 @@ def test_delete_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = bigtable_table_admin.DeleteTableRequest() metadata = [ @@ -20181,6 +20257,7 @@ def test_undelete_table_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.undelete_table(request) @@ -20211,6 +20288,7 @@ def test_undelete_table_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.undelete_table(request) # Establish that the response is the type that we expect. @@ -20252,6 +20330,7 @@ def test_undelete_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -20296,6 +20375,7 @@ def test_create_authorized_view_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_authorized_view(request) @@ -20404,6 +20484,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_authorized_view(request) # Establish that the response is the type that we expect. @@ -20445,6 +20526,7 @@ def test_create_authorized_view_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -20489,6 +20571,7 @@ def test_list_authorized_views_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_authorized_views(request) @@ -20524,6 +20607,7 @@ def test_list_authorized_views_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_authorized_views(request) # Establish that the response is the type that we expect. @@ -20564,6 +20648,7 @@ def test_list_authorized_views_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_table_admin.ListAuthorizedViewsResponse.to_json( bigtable_table_admin.ListAuthorizedViewsResponse() ) @@ -20612,6 +20697,7 @@ def test_get_authorized_view_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_authorized_view(request) @@ -20651,6 +20737,7 @@ def test_get_authorized_view_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_authorized_view(request) # Establish that the response is the type that we expect. @@ -20693,6 +20780,7 @@ def test_get_authorized_view_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = table.AuthorizedView.to_json(table.AuthorizedView()) req.return_value.content = return_value @@ -20741,6 +20829,7 @@ def test_update_authorized_view_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_authorized_view(request) @@ -20853,6 +20942,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_authorized_view(request) # Establish that the response is the type that we expect. @@ -20894,6 +20984,7 @@ def test_update_authorized_view_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -20940,6 +21031,7 @@ def test_delete_authorized_view_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_authorized_view(request) @@ -20972,6 +21064,7 @@ def test_delete_authorized_view_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_authorized_view(request) # Establish that the response is the type that we expect. @@ -21008,6 +21101,7 @@ def test_delete_authorized_view_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = bigtable_table_admin.DeleteAuthorizedViewRequest() metadata = [ @@ -21048,6 +21142,7 @@ def test_modify_column_families_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.modify_column_families(request) @@ -21085,6 +21180,7 @@ def test_modify_column_families_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.modify_column_families(request) # Establish that the response is the type that we expect. @@ -21127,6 +21223,7 @@ def test_modify_column_families_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = table.Table.to_json(table.Table()) req.return_value.content = return_value @@ -21171,6 +21268,7 @@ def test_drop_row_range_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.drop_row_range(request) @@ -21201,6 +21299,7 @@ def test_drop_row_range_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.drop_row_range(request) # Establish that the response is the type that we expect. @@ -21237,6 +21336,7 @@ def test_drop_row_range_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = bigtable_table_admin.DropRowRangeRequest() metadata = [ @@ -21277,6 +21377,7 @@ def test_generate_consistency_token_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.generate_consistency_token(request) @@ -21314,6 +21415,7 @@ def test_generate_consistency_token_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.generate_consistency_token(request) # Establish that the response is the type that we expect. @@ -21354,6 +21456,7 @@ def test_generate_consistency_token_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.to_json( bigtable_table_admin.GenerateConsistencyTokenResponse() ) @@ -21400,6 +21503,7 @@ def test_check_consistency_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.check_consistency(request) @@ -21435,6 +21539,7 @@ def test_check_consistency_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.check_consistency(request) # Establish that the response is the type that we expect. @@ -21475,6 +21580,7 @@ def test_check_consistency_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_table_admin.CheckConsistencyResponse.to_json( bigtable_table_admin.CheckConsistencyResponse() ) @@ -21521,6 +21627,7 @@ def test_snapshot_table_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.snapshot_table(request) @@ -21551,6 +21658,7 @@ def test_snapshot_table_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.snapshot_table(request) # Establish that the response is the type that we expect. @@ -21592,6 +21700,7 @@ def test_snapshot_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -21638,6 +21747,7 @@ def test_get_snapshot_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_snapshot(request) @@ -21678,6 +21788,7 @@ def test_get_snapshot_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_snapshot(request) # Establish that the response is the type that we expect. @@ -21721,6 +21832,7 @@ def test_get_snapshot_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = table.Snapshot.to_json(table.Snapshot()) req.return_value.content = return_value @@ -21765,6 +21877,7 @@ def test_list_snapshots_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_snapshots(request) @@ -21800,6 +21913,7 @@ def test_list_snapshots_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_snapshots(request) # Establish that the response is the type that we expect. @@ -21840,6 +21954,7 @@ def test_list_snapshots_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_table_admin.ListSnapshotsResponse.to_json( bigtable_table_admin.ListSnapshotsResponse() ) @@ -21888,6 +22003,7 @@ def test_delete_snapshot_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_snapshot(request) @@ -21920,6 +22036,7 @@ def test_delete_snapshot_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_snapshot(request) # Establish that the response is the type that we expect. @@ -21956,6 +22073,7 @@ def test_delete_snapshot_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = bigtable_table_admin.DeleteSnapshotRequest() metadata = [ @@ -21996,6 +22114,7 @@ def test_create_backup_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.create_backup(request) @@ -22119,6 +22238,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.create_backup(request) # Establish that the response is the type that we expect. @@ -22160,6 +22280,7 @@ def test_create_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -22206,6 +22327,7 @@ def test_get_backup_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_backup(request) @@ -22248,6 +22370,7 @@ def test_get_backup_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_backup(request) # Establish that the response is the type that we expect. @@ -22293,6 +22416,7 @@ def test_get_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = table.Backup.to_json(table.Backup()) req.return_value.content = return_value @@ -22341,6 +22465,7 @@ def test_update_backup_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.update_backup(request) @@ -22478,6 +22603,7 @@ def get_message_fields(field): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.update_backup(request) # Establish that the response is the type that we expect. @@ -22523,6 +22649,7 @@ def test_update_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = table.Backup.to_json(table.Backup()) req.return_value.content = return_value @@ -22569,6 +22696,7 @@ def test_delete_backup_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.delete_backup(request) @@ -22601,6 +22729,7 @@ def test_delete_backup_rest_call_success(request_type): json_return_value = "" response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.delete_backup(request) # Establish that the response is the type that we expect. @@ -22637,6 +22766,7 @@ def test_delete_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} request = bigtable_table_admin.DeleteBackupRequest() metadata = [ @@ -22677,6 +22807,7 @@ def test_list_backups_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.list_backups(request) @@ -22712,6 +22843,7 @@ def test_list_backups_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.list_backups(request) # Establish that the response is the type that we expect. @@ -22752,6 +22884,7 @@ def test_list_backups_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable_table_admin.ListBackupsResponse.to_json( bigtable_table_admin.ListBackupsResponse() ) @@ -22798,6 +22931,7 @@ def test_restore_table_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.restore_table(request) @@ -22828,6 +22962,7 @@ def test_restore_table_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.restore_table(request) # Establish that the response is the type that we expect. @@ -22869,6 +23004,7 @@ def test_restore_table_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -22913,6 +23049,7 @@ def test_copy_backup_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.copy_backup(request) @@ -22943,6 +23080,7 @@ def test_copy_backup_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.copy_backup(request) # Establish that the response is the type that we expect. @@ -22984,6 +23122,7 @@ def test_copy_backup_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(operations_pb2.Operation()) req.return_value.content = return_value @@ -23028,6 +23167,7 @@ def test_get_iam_policy_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.get_iam_policy(request) @@ -23061,6 +23201,7 @@ def test_get_iam_policy_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.get_iam_policy(request) # Establish that the response is the type that we expect. @@ -23100,6 +23241,7 @@ def test_get_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(policy_pb2.Policy()) req.return_value.content = return_value @@ -23144,6 +23286,7 @@ def test_set_iam_policy_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.set_iam_policy(request) @@ -23177,6 +23320,7 @@ def test_set_iam_policy_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.set_iam_policy(request) # Establish that the response is the type that we expect. @@ -23216,6 +23360,7 @@ def test_set_iam_policy_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson(policy_pb2.Policy()) req.return_value.content = return_value @@ -23260,6 +23405,7 @@ def test_test_iam_permissions_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.test_iam_permissions(request) @@ -23292,6 +23438,7 @@ def test_test_iam_permissions_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.test_iam_permissions(request) # Establish that the response is the type that we expect. @@ -23330,6 +23477,7 @@ def test_test_iam_permissions_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = json_format.MessageToJson( iam_policy_pb2.TestIamPermissionsResponse() ) diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py index 37b4bbfca..a5bff8aea 100644 --- a/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -4218,6 +4218,7 @@ def test_read_rows_rest_flattened(): json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -4318,6 +4319,7 @@ def test_sample_row_keys_rest_flattened(): json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -4451,6 +4453,7 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.mutate_row(request) @@ -4513,6 +4516,7 @@ def test_mutate_row_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.mutate_row(**mock_args) @@ -4646,6 +4650,7 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -4698,6 +4703,7 @@ def test_mutate_rows_rest_flattened(): json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -4838,6 +4844,7 @@ def test_check_and_mutate_row_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.check_and_mutate_row(request) @@ -4908,6 +4915,7 @@ def test_check_and_mutate_row_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.check_and_mutate_row(**mock_args) @@ -5061,6 +5069,7 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.ping_and_warm(request) @@ -5107,6 +5116,7 @@ def test_ping_and_warm_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.ping_and_warm(**mock_args) @@ -5243,6 +5253,7 @@ def test_read_modify_write_row_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.read_modify_write_row(request) @@ -5301,6 +5312,7 @@ def test_read_modify_write_row_rest_flattened(): json_return_value = json_format.MessageToJson(return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.read_modify_write_row(**mock_args) @@ -5448,6 +5460,7 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -5505,6 +5518,7 @@ def test_generate_initial_change_stream_partitions_rest_flattened(): json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -5647,6 +5661,7 @@ def test_read_change_stream_rest_required_fields( response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -5698,6 +5713,7 @@ def test_read_change_stream_rest_flattened(): json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -5836,6 +5852,7 @@ def test_execute_query_rest_required_fields(request_type=bigtable.ExecuteQueryRe response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -5895,6 +5912,7 @@ def test_execute_query_rest_flattened(): json_return_value = "[{}]".format(json_return_value) response_value._content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} with mock.patch.object(response_value, "iter_content") as iter_content: iter_content.return_value = iter(json_return_value) @@ -7955,6 +7973,7 @@ def test_read_rows_rest_bad_request(request_type=bigtable.ReadRowsRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.read_rows(request) @@ -7991,6 +8010,7 @@ def test_read_rows_rest_call_success(request_type): json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.read_rows(request) assert isinstance(response, Iterable) @@ -8030,6 +8050,7 @@ def test_read_rows_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.ReadRowsResponse.to_json(bigtable.ReadRowsResponse()) req.return_value.iter_content = mock.Mock(return_value=iter(return_value)) @@ -8072,6 +8093,7 @@ def test_sample_row_keys_rest_bad_request(request_type=bigtable.SampleRowKeysReq response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.sample_row_keys(request) @@ -8109,6 +8131,7 @@ def test_sample_row_keys_rest_call_success(request_type): json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.sample_row_keys(request) assert isinstance(response, Iterable) @@ -8149,6 +8172,7 @@ def test_sample_row_keys_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.SampleRowKeysResponse.to_json( bigtable.SampleRowKeysResponse() ) @@ -8193,6 +8217,7 @@ def test_mutate_row_rest_bad_request(request_type=bigtable.MutateRowRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.mutate_row(request) @@ -8226,6 +8251,7 @@ def test_mutate_row_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.mutate_row(request) # Establish that the response is the type that we expect. @@ -8261,6 +8287,7 @@ def test_mutate_row_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.MutateRowResponse.to_json(bigtable.MutateRowResponse()) req.return_value.content = return_value @@ -8303,6 +8330,7 @@ def test_mutate_rows_rest_bad_request(request_type=bigtable.MutateRowsRequest): response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.mutate_rows(request) @@ -8337,6 +8365,7 @@ def test_mutate_rows_rest_call_success(request_type): json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.mutate_rows(request) assert isinstance(response, Iterable) @@ -8375,6 +8404,7 @@ def test_mutate_rows_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.MutateRowsResponse.to_json( bigtable.MutateRowsResponse() ) @@ -8421,6 +8451,7 @@ def test_check_and_mutate_row_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.check_and_mutate_row(request) @@ -8456,6 +8487,7 @@ def test_check_and_mutate_row_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.check_and_mutate_row(request) # Establish that the response is the type that we expect. @@ -8494,6 +8526,7 @@ def test_check_and_mutate_row_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.CheckAndMutateRowResponse.to_json( bigtable.CheckAndMutateRowResponse() ) @@ -8538,6 +8571,7 @@ def test_ping_and_warm_rest_bad_request(request_type=bigtable.PingAndWarmRequest response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.ping_and_warm(request) @@ -8571,6 +8605,7 @@ def test_ping_and_warm_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.ping_and_warm(request) # Establish that the response is the type that we expect. @@ -8606,6 +8641,7 @@ def test_ping_and_warm_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.PingAndWarmResponse.to_json( bigtable.PingAndWarmResponse() ) @@ -8652,6 +8688,7 @@ def test_read_modify_write_row_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.read_modify_write_row(request) @@ -8685,6 +8722,7 @@ def test_read_modify_write_row_rest_call_success(request_type): json_return_value = json_format.MessageToJson(return_value) response_value.content = json_return_value.encode("UTF-8") req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.read_modify_write_row(request) # Establish that the response is the type that we expect. @@ -8722,6 +8760,7 @@ def test_read_modify_write_row_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.ReadModifyWriteRowResponse.to_json( bigtable.ReadModifyWriteRowResponse() ) @@ -8768,6 +8807,7 @@ def test_generate_initial_change_stream_partitions_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.generate_initial_change_stream_partitions(request) @@ -8804,6 +8844,7 @@ def test_generate_initial_change_stream_partitions_rest_call_success(request_typ json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.generate_initial_change_stream_partitions(request) assert isinstance(response, Iterable) @@ -8846,6 +8887,7 @@ def test_generate_initial_change_stream_partitions_rest_interceptors(null_interc req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.to_json( bigtable.GenerateInitialChangeStreamPartitionsResponse() ) @@ -8892,6 +8934,7 @@ def test_read_change_stream_rest_bad_request( response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.read_change_stream(request) @@ -8926,6 +8969,7 @@ def test_read_change_stream_rest_call_success(request_type): json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.read_change_stream(request) assert isinstance(response, Iterable) @@ -8966,6 +9010,7 @@ def test_read_change_stream_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.ReadChangeStreamResponse.to_json( bigtable.ReadChangeStreamResponse() ) @@ -9010,6 +9055,7 @@ def test_execute_query_rest_bad_request(request_type=bigtable.ExecuteQueryReques response_value.status_code = 400 response_value.request = mock.Mock() req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} client.execute_query(request) @@ -9044,6 +9090,7 @@ def test_execute_query_rest_call_success(request_type): json_return_value = "[{}]".format(json_return_value) response_value.iter_content = mock.Mock(return_value=iter(json_return_value)) req.return_value = response_value + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} response = client.execute_query(request) assert isinstance(response, Iterable) @@ -9082,6 +9129,7 @@ def test_execute_query_rest_interceptors(null_interceptor): req.return_value = mock.Mock() req.return_value.status_code = 200 + req.return_value.headers = {"header-1": "value-1", "header-2": "value-2"} return_value = bigtable.ExecuteQueryResponse.to_json( bigtable.ExecuteQueryResponse() )