diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py index e4c4639af..ab14ddaed 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py @@ -38,9 +38,9 @@ from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -67,8 +67,12 @@ class BigtableInstanceAdminAsyncClient: _client: BigtableInstanceAdminClient + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = BigtableInstanceAdminClient._DEFAULT_UNIVERSE app_profile_path = staticmethod(BigtableInstanceAdminClient.app_profile_path) parse_app_profile_path = staticmethod( @@ -193,6 +197,25 @@ def transport(self) -> BigtableInstanceAdminTransport: """ return self._client.transport + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + get_transport_class = functools.partial( type(BigtableInstanceAdminClient).get_transport_class, type(BigtableInstanceAdminClient), @@ -206,7 +229,7 @@ def __init__( client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiates the bigtable instance admin client. + """Instantiates the bigtable instance admin async client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -217,23 +240,38 @@ def __init__( transport (Union[str, ~.BigtableInstanceAdminTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. @@ -360,6 +398,9 @@ async def create_instance( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -460,6 +501,9 @@ async def get_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -549,6 +593,9 @@ async def list_instances( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -623,6 +670,9 @@ async def update_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -731,6 +781,9 @@ async def partial_update_instance( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -812,6 +865,9 @@ async def delete_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -920,6 +976,9 @@ async def create_cluster( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1019,6 +1078,9 @@ async def get_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1110,6 +1172,9 @@ async def list_clusters( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1184,6 +1249,9 @@ async def update_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1299,6 +1367,9 @@ async def partial_update_cluster( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1380,6 +1451,9 @@ async def delete_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -1479,6 +1553,9 @@ async def create_app_profile( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1569,6 +1646,9 @@ async def get_app_profile( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1664,6 +1744,9 @@ async def list_app_profiles( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1776,6 +1859,9 @@ async def update_app_profile( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1857,6 +1943,9 @@ async def delete_app_profile( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -1973,6 +2062,9 @@ async def get_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2081,6 +2173,9 @@ async def set_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2180,6 +2275,9 @@ async def test_iam_permissions( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2273,6 +2371,9 @@ async def list_hot_tablets( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py index 52c61ea4f..5f97b7511 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py @@ -28,6 +28,7 @@ Union, cast, ) +import warnings from google.cloud.bigtable_admin_v2 import gapic_version as package_version @@ -42,9 +43,9 @@ from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -137,11 +138,15 @@ def _get_default_mtls_endpoint(api_endpoint): return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) + _DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -403,7 +408,7 @@ def parse_common_location_path(path: str) -> Dict[str, str]: def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[client_options_lib.ClientOptions] = None ): - """Return the API endpoint and client cert source for mutual TLS. + """Deprecated. Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the @@ -433,6 +438,11 @@ def get_mtls_endpoint_and_cert_source( Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) if client_options is None: client_options = client_options_lib.ClientOptions() use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") @@ -466,6 +476,177 @@ def get_mtls_endpoint_and_cert_source( return api_endpoint, client_cert_source + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = ( + BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + if credentials: + credentials_universe = credentials.universe_domain + if client_universe != credentials_universe: + default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or BigtableInstanceAdminClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + def __init__( self, *, @@ -485,22 +666,32 @@ def __init__( transport (Union[str, BigtableInstanceAdminTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. @@ -511,17 +702,34 @@ def __init__( google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - client_options = cast(client_options_lib.ClientOptions, client_options) + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) - api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( - client_options + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = BigtableInstanceAdminClient._read_environment_variables() + self._client_cert_source = BigtableInstanceAdminClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = BigtableInstanceAdminClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env ) + self._api_endpoint = None # updated below, depending on `transport` - api_key_value = getattr(client_options, "api_key", None) + # Initialize the universe domain validation. + self._is_universe_domain_valid = False + + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( "client_options.api_key and credentials are mutually exclusive" @@ -530,20 +738,33 @@ def __init__( # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. - if isinstance(transport, BigtableInstanceAdminTransport): + transport_provided = isinstance(transport, BigtableInstanceAdminTransport) + if transport_provided: # transport is a BigtableInstanceAdminTransport instance. - if credentials or client_options.credentials_file or api_key_value: + if credentials or self._client_options.credentials_file or api_key_value: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) - if client_options.scopes: + if self._client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) - self._transport = transport - else: + self._transport = cast(BigtableInstanceAdminTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or BigtableInstanceAdminClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: import google.auth._default # type: ignore if api_key_value and hasattr( @@ -553,17 +774,17 @@ def __init__( api_key_value ) - Transport = type(self).get_transport_class(transport) + Transport = type(self).get_transport_class(cast(str, transport)) self._transport = Transport( credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, - api_audience=client_options.api_audience, + api_audience=self._client_options.api_audience, ) def create_instance( @@ -680,6 +901,9 @@ def create_instance( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -770,6 +994,9 @@ def get_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -849,6 +1076,9 @@ def list_instances( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -914,6 +1144,9 @@ def update_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1014,6 +1247,9 @@ def partial_update_instance( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1095,6 +1331,9 @@ def delete_instance( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -1203,6 +1442,9 @@ def create_cluster( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1292,6 +1534,9 @@ def get_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1373,6 +1618,9 @@ def list_clusters( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1438,6 +1686,9 @@ def update_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1553,6 +1804,9 @@ def partial_update_cluster( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1634,6 +1888,9 @@ def delete_cluster( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -1733,6 +1990,9 @@ def create_app_profile( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1813,6 +2073,9 @@ def get_app_profile( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1898,6 +2161,9 @@ def list_app_profiles( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2000,6 +2266,9 @@ def update_app_profile( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2081,6 +2350,9 @@ def delete_app_profile( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -2184,6 +2456,9 @@ def get_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2289,6 +2564,9 @@ def set_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2376,6 +2654,9 @@ def test_iam_permissions( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2459,6 +2740,9 @@ def list_hot_tablets( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py index d92d25453..aeb07556c 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py @@ -71,7 +71,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -134,6 +134,10 @@ def __init__( host += ":443" self._host = host + @property + def host(self): + return self._host + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py index eca37957d..c47db6ba5 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py @@ -73,7 +73,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py index 145aa427d..cbd77b381 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py @@ -118,7 +118,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py index 9d5502b7e..61f425953 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py @@ -35,9 +35,9 @@ import warnings try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin @@ -734,7 +734,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py index 5a4435bde..124b3ef09 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py @@ -38,9 +38,9 @@ from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -67,8 +67,12 @@ class BigtableTableAdminAsyncClient: _client: BigtableTableAdminClient + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = BigtableTableAdminClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = BigtableTableAdminClient._DEFAULT_UNIVERSE backup_path = staticmethod(BigtableTableAdminClient.backup_path) parse_backup_path = staticmethod(BigtableTableAdminClient.parse_backup_path) @@ -189,6 +193,25 @@ def transport(self) -> BigtableTableAdminTransport: """ return self._client.transport + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + get_transport_class = functools.partial( type(BigtableTableAdminClient).get_transport_class, type(BigtableTableAdminClient), @@ -202,7 +225,7 @@ def __init__( client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiates the bigtable table admin client. + """Instantiates the bigtable table admin async client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -213,23 +236,38 @@ def __init__( transport (Union[str, ~.BigtableTableAdminTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. @@ -331,6 +369,9 @@ async def create_table( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -452,6 +493,9 @@ async def create_table_from_snapshot( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -550,6 +594,9 @@ async def list_tables( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -648,6 +695,9 @@ async def get_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -751,6 +801,9 @@ async def update_table( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -831,6 +884,9 @@ async def delete_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -911,6 +967,9 @@ async def undelete_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1023,6 +1082,9 @@ async def modify_column_families( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1074,6 +1136,9 @@ async def drop_row_range( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -1164,6 +1229,9 @@ async def generate_consistency_token( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1266,6 +1334,9 @@ async def check_consistency( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1403,6 +1474,9 @@ async def snapshot_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1523,6 +1597,9 @@ async def get_snapshot( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1639,6 +1716,9 @@ async def list_snapshots( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1735,6 +1815,9 @@ async def delete_snapshot( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -1844,6 +1927,9 @@ async def create_backup( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1937,6 +2023,9 @@ async def get_backup( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2032,6 +2121,9 @@ async def update_backup( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2103,6 +2195,9 @@ async def delete_backup( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. await rpc( request, @@ -2194,6 +2289,9 @@ async def list_backups( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2267,6 +2365,9 @@ async def restore_table( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2402,6 +2503,9 @@ async def copy_backup( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2529,6 +2633,9 @@ async def get_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2637,6 +2744,9 @@ async def set_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -2736,6 +2846,9 @@ async def test_iam_permissions( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py index d0c04ed11..f97eefb44 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py @@ -28,6 +28,7 @@ Union, cast, ) +import warnings from google.cloud.bigtable_admin_v2 import gapic_version as package_version @@ -42,9 +43,9 @@ from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore from google.api_core import operation # type: ignore from google.api_core import operation_async # type: ignore @@ -137,11 +138,15 @@ def _get_default_mtls_endpoint(api_endpoint): return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) + _DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -405,7 +410,7 @@ def parse_common_location_path(path: str) -> Dict[str, str]: def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[client_options_lib.ClientOptions] = None ): - """Return the API endpoint and client cert source for mutual TLS. + """Deprecated. Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the @@ -435,6 +440,11 @@ def get_mtls_endpoint_and_cert_source( Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) if client_options is None: client_options = client_options_lib.ClientOptions() use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") @@ -468,6 +478,175 @@ def get_mtls_endpoint_and_cert_source( return api_endpoint, client_cert_source + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = BigtableTableAdminClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + if credentials: + credentials_universe = credentials.universe_domain + if client_universe != credentials_universe: + default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or BigtableTableAdminClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + def __init__( self, *, @@ -487,22 +666,32 @@ def __init__( transport (Union[str, BigtableTableAdminTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. @@ -513,17 +702,34 @@ def __init__( google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - client_options = cast(client_options_lib.ClientOptions, client_options) + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) - api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( - client_options + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = BigtableTableAdminClient._read_environment_variables() + self._client_cert_source = BigtableTableAdminClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = BigtableTableAdminClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False - api_key_value = getattr(client_options, "api_key", None) + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( "client_options.api_key and credentials are mutually exclusive" @@ -532,20 +738,33 @@ def __init__( # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. - if isinstance(transport, BigtableTableAdminTransport): + transport_provided = isinstance(transport, BigtableTableAdminTransport) + if transport_provided: # transport is a BigtableTableAdminTransport instance. - if credentials or client_options.credentials_file or api_key_value: + if credentials or self._client_options.credentials_file or api_key_value: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) - if client_options.scopes: + if self._client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) - self._transport = transport - else: + self._transport = cast(BigtableTableAdminTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = ( + self._api_endpoint + or BigtableTableAdminClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + ) + + if not transport_provided: import google.auth._default # type: ignore if api_key_value and hasattr( @@ -555,17 +774,17 @@ def __init__( api_key_value ) - Transport = type(self).get_transport_class(transport) + Transport = type(self).get_transport_class(cast(str, transport)) self._transport = Transport( credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, - api_audience=client_options.api_audience, + api_audience=self._client_options.api_audience, ) def create_table( @@ -658,6 +877,9 @@ def create_table( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -781,6 +1003,9 @@ def create_table_from_snapshot( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -869,6 +1094,9 @@ def list_tables( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -957,6 +1185,9 @@ def get_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1060,6 +1291,9 @@ def update_table( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1140,6 +1374,9 @@ def delete_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -1220,6 +1457,9 @@ def undelete_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1332,6 +1572,9 @@ def modify_column_families( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1384,6 +1627,9 @@ def drop_row_range( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -1468,6 +1714,9 @@ def generate_consistency_token( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1560,6 +1809,9 @@ def check_consistency( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1697,6 +1949,9 @@ def snapshot_table( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1807,6 +2062,9 @@ def get_snapshot( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1913,6 +2171,9 @@ def list_snapshots( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2009,6 +2270,9 @@ def delete_snapshot( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -2118,6 +2382,9 @@ def create_backup( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2201,6 +2468,9 @@ def get_backup( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2296,6 +2566,9 @@ def update_backup( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2367,6 +2640,9 @@ def delete_backup( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. rpc( request, @@ -2448,6 +2724,9 @@ def list_backups( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2522,6 +2801,9 @@ def restore_table( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2657,6 +2939,9 @@ def copy_backup( gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2771,6 +3056,9 @@ def get_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2876,6 +3164,9 @@ def set_iam_policy( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -2963,6 +3254,9 @@ def test_iam_permissions( gapic_v1.routing_header.to_grpc_metadata((("resource", request.resource),)), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py index c3cf01a96..e0313a946 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py @@ -71,7 +71,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -134,6 +134,10 @@ def __init__( host += ":443" self._host = host + @property + def host(self): + return self._host + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py index d765869cd..b0c33eca9 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py @@ -75,7 +75,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py index b60a7351c..3ae66f84f 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py @@ -120,7 +120,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py index 41b893eb7..ad171d8f3 100644 --- a/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ b/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py @@ -35,9 +35,9 @@ import warnings try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore from google.cloud.bigtable_admin_v2.types import bigtable_table_admin @@ -831,7 +831,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtableadmin.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/google/cloud/bigtable_v2/services/bigtable/async_client.py b/google/cloud/bigtable_v2/services/bigtable/async_client.py index 33686a4a8..c9510dedf 100644 --- a/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ b/google/cloud/bigtable_v2/services/bigtable/async_client.py @@ -40,9 +40,9 @@ from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.AsyncRetry, object] # type: ignore + OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data @@ -59,8 +59,12 @@ class BigtableAsyncClient: _client: BigtableClient + # Copy defaults from the synchronous client for use here. + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = BigtableClient.DEFAULT_ENDPOINT DEFAULT_MTLS_ENDPOINT = BigtableClient.DEFAULT_MTLS_ENDPOINT + _DEFAULT_ENDPOINT_TEMPLATE = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE + _DEFAULT_UNIVERSE = BigtableClient._DEFAULT_UNIVERSE instance_path = staticmethod(BigtableClient.instance_path) parse_instance_path = staticmethod(BigtableClient.parse_instance_path) @@ -161,6 +165,25 @@ def transport(self) -> BigtableTransport: """ return self._client.transport + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._client._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used + by the client instance. + """ + return self._client._universe_domain + get_transport_class = functools.partial( type(BigtableClient).get_transport_class, type(BigtableClient) ) @@ -173,7 +196,7 @@ def __init__( client_options: Optional[ClientOptions] = None, client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, ) -> None: - """Instantiates the bigtable client. + """Instantiates the bigtable async client. Args: credentials (Optional[google.auth.credentials.Credentials]): The @@ -184,23 +207,38 @@ def __init__( transport (Union[str, ~.BigtableTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (ClientOptions): Custom options for the client. It - won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + + client_info (google.api_core.gapic_v1.client_info.ClientInfo): + The client info used to send a user-agent string along with + API requests. If ``None``, then default info will be used. + Generally, you only need to set this if you're developing + your own client library. + Raises: google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport creation failed for any reason. @@ -297,6 +335,9 @@ def read_rows( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = rpc( request, @@ -392,6 +433,9 @@ def sample_row_keys( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = rpc( request, @@ -518,6 +562,9 @@ async def mutate_row( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -628,6 +675,9 @@ def mutate_rows( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = rpc( request, @@ -782,6 +832,9 @@ async def check_and_mutate_row( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -874,6 +927,9 @@ async def ping_and_warm( gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -997,6 +1053,9 @@ async def read_modify_write_row( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = await rpc( request, @@ -1101,6 +1160,9 @@ def generate_initial_change_stream_partitions( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1199,6 +1261,9 @@ def read_change_stream( ), ) + # Validate the universe domain. + self._client._validate_universe_domain() + # Send the request. response = rpc( request, diff --git a/google/cloud/bigtable_v2/services/bigtable/client.py b/google/cloud/bigtable_v2/services/bigtable/client.py index db393faa7..0b8b2ab11 100644 --- a/google/cloud/bigtable_v2/services/bigtable/client.py +++ b/google/cloud/bigtable_v2/services/bigtable/client.py @@ -29,6 +29,7 @@ Union, cast, ) +import warnings from google.cloud.bigtable_v2 import gapic_version as package_version @@ -43,9 +44,9 @@ from google.oauth2 import service_account # type: ignore try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore from google.cloud.bigtable_v2.types import bigtable from google.cloud.bigtable_v2.types import data @@ -126,11 +127,15 @@ def _get_default_mtls_endpoint(api_endpoint): return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") + # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. DEFAULT_ENDPOINT = "bigtable.googleapis.com" DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore DEFAULT_ENDPOINT ) + _DEFAULT_ENDPOINT_TEMPLATE = "bigtable.{UNIVERSE_DOMAIN}" + _DEFAULT_UNIVERSE = "googleapis.com" + @classmethod def from_service_account_info(cls, info: dict, *args, **kwargs): """Creates an instance of this client using the provided credentials @@ -298,7 +303,7 @@ def parse_common_location_path(path: str) -> Dict[str, str]: def get_mtls_endpoint_and_cert_source( cls, client_options: Optional[client_options_lib.ClientOptions] = None ): - """Return the API endpoint and client cert source for mutual TLS. + """Deprecated. Return the API endpoint and client cert source for mutual TLS. The client cert source is determined in the following order: (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the @@ -328,6 +333,11 @@ def get_mtls_endpoint_and_cert_source( Raises: google.auth.exceptions.MutualTLSChannelError: If any errors happen. """ + + warnings.warn( + "get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", + DeprecationWarning, + ) if client_options is None: client_options = client_options_lib.ClientOptions() use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") @@ -361,6 +371,175 @@ def get_mtls_endpoint_and_cert_source( return api_endpoint, client_cert_source + @staticmethod + def _read_environment_variables(): + """Returns the environment variables used by the client. + + Returns: + Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, + GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. + + Raises: + ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not + any of ["true", "false"]. + google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT + is not any of ["auto", "never", "always"]. + """ + use_client_cert = os.getenv( + "GOOGLE_API_USE_CLIENT_CERTIFICATE", "false" + ).lower() + use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() + universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") + if use_client_cert not in ("true", "false"): + raise ValueError( + "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + if use_mtls_endpoint not in ("auto", "never", "always"): + raise MutualTLSChannelError( + "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + return use_client_cert == "true", use_mtls_endpoint, universe_domain_env + + def _get_client_cert_source(provided_cert_source, use_cert_flag): + """Return the client cert source to be used by the client. + + Args: + provided_cert_source (bytes): The client certificate source provided. + use_cert_flag (bool): A flag indicating whether to use the client certificate. + + Returns: + bytes or None: The client cert source to be used by the client. + """ + client_cert_source = None + if use_cert_flag: + if provided_cert_source: + client_cert_source = provided_cert_source + elif mtls.has_default_client_cert_source(): + client_cert_source = mtls.default_client_cert_source() + return client_cert_source + + def _get_api_endpoint( + api_override, client_cert_source, universe_domain, use_mtls_endpoint + ): + """Return the API endpoint used by the client. + + Args: + api_override (str): The API endpoint override. If specified, this is always + the return value of this function and the other arguments are not used. + client_cert_source (bytes): The client certificate source used by the client. + universe_domain (str): The universe domain used by the client. + use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. + Possible values are "always", "auto", or "never". + + Returns: + str: The API endpoint to be used by the client. + """ + if api_override is not None: + api_endpoint = api_override + elif use_mtls_endpoint == "always" or ( + use_mtls_endpoint == "auto" and client_cert_source + ): + _default_universe = BigtableClient._DEFAULT_UNIVERSE + if universe_domain != _default_universe: + raise MutualTLSChannelError( + f"mTLS is not supported in any universe other than {_default_universe}." + ) + api_endpoint = BigtableClient.DEFAULT_MTLS_ENDPOINT + else: + api_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=universe_domain + ) + return api_endpoint + + @staticmethod + def _get_universe_domain( + client_universe_domain: Optional[str], universe_domain_env: Optional[str] + ) -> str: + """Return the universe domain used by the client. + + Args: + client_universe_domain (Optional[str]): The universe domain configured via the client options. + universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. + + Returns: + str: The universe domain to be used by the client. + + Raises: + ValueError: If the universe domain is an empty string. + """ + universe_domain = BigtableClient._DEFAULT_UNIVERSE + if client_universe_domain is not None: + universe_domain = client_universe_domain + elif universe_domain_env is not None: + universe_domain = universe_domain_env + if len(universe_domain.strip()) == 0: + raise ValueError("Universe Domain cannot be an empty string.") + return universe_domain + + @staticmethod + def _compare_universes( + client_universe: str, credentials: ga_credentials.Credentials + ) -> bool: + """Returns True iff the universe domains used by the client and credentials match. + + Args: + client_universe (str): The universe domain configured via the client options. + credentials (ga_credentials.Credentials): The credentials being used in the client. + + Returns: + bool: True iff client_universe matches the universe in credentials. + + Raises: + ValueError: when client_universe does not match the universe in credentials. + """ + if credentials: + credentials_universe = credentials.universe_domain + if client_universe != credentials_universe: + default_universe = BigtableClient._DEFAULT_UNIVERSE + raise ValueError( + "The configured universe domain " + f"({client_universe}) does not match the universe domain " + f"found in the credentials ({credentials_universe}). " + "If you haven't configured the universe domain explicitly, " + f"`{default_universe}` is the default." + ) + return True + + def _validate_universe_domain(self): + """Validates client's and credentials' universe domains are consistent. + + Returns: + bool: True iff the configured universe domain is valid. + + Raises: + ValueError: If the configured universe domain is not valid. + """ + self._is_universe_domain_valid = ( + self._is_universe_domain_valid + or BigtableClient._compare_universes( + self.universe_domain, self.transport._credentials + ) + ) + return self._is_universe_domain_valid + + @property + def api_endpoint(self): + """Return the API endpoint used by the client instance. + + Returns: + str: The API endpoint used by the client instance. + """ + return self._api_endpoint + + @property + def universe_domain(self) -> str: + """Return the universe domain used by the client instance. + + Returns: + str: The universe domain used by the client instance. + """ + return self._universe_domain + def __init__( self, *, @@ -380,22 +559,32 @@ def __init__( transport (Union[str, BigtableTransport]): The transport to use. If set to None, a transport is chosen automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): Custom options for the - client. It won't take effect if a ``transport`` instance is provided. - (1) The ``api_endpoint`` property can be used to override the - default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT - environment variable can also be used to override the endpoint: + client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): + Custom options for the client. + + 1. The ``api_endpoint`` property can be used to override the + default endpoint provided by the client when ``transport`` is + not explicitly provided. Only if this property is not set and + ``transport`` was not explicitly provided, the endpoint is + determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment + variable, which have one of the following values: "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto switch to the - default mTLS endpoint if client certificate is present, this is - the default value). However, the ``api_endpoint`` property takes - precedence if provided. - (2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable + use the default regular endpoint) and "auto" (auto-switch to the + default mTLS endpoint if client certificate is present; this is + the default value). + + 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable is "true", then the ``client_cert_source`` property can be used - to provide client certificate for mutual TLS transport. If + to provide a client certificate for mTLS transport. If not provided, the default SSL client certificate will be used if present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not set, no client certificate will be used. + + 3. The ``universe_domain`` property can be used to override the + default "googleapis.com" universe. Note that the ``api_endpoint`` + property still takes precedence; and ``universe_domain`` is + currently not supported for mTLS. + client_info (google.api_core.gapic_v1.client_info.ClientInfo): The client info used to send a user-agent string along with API requests. If ``None``, then default info will be used. @@ -406,17 +595,34 @@ def __init__( google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport creation failed for any reason. """ - if isinstance(client_options, dict): - client_options = client_options_lib.from_dict(client_options) - if client_options is None: - client_options = client_options_lib.ClientOptions() - client_options = cast(client_options_lib.ClientOptions, client_options) + self._client_options = client_options + if isinstance(self._client_options, dict): + self._client_options = client_options_lib.from_dict(self._client_options) + if self._client_options is None: + self._client_options = client_options_lib.ClientOptions() + self._client_options = cast( + client_options_lib.ClientOptions, self._client_options + ) - api_endpoint, client_cert_source_func = self.get_mtls_endpoint_and_cert_source( - client_options + universe_domain_opt = getattr(self._client_options, "universe_domain", None) + + ( + self._use_client_cert, + self._use_mtls_endpoint, + self._universe_domain_env, + ) = BigtableClient._read_environment_variables() + self._client_cert_source = BigtableClient._get_client_cert_source( + self._client_options.client_cert_source, self._use_client_cert + ) + self._universe_domain = BigtableClient._get_universe_domain( + universe_domain_opt, self._universe_domain_env ) + self._api_endpoint = None # updated below, depending on `transport` + + # Initialize the universe domain validation. + self._is_universe_domain_valid = False - api_key_value = getattr(client_options, "api_key", None) + api_key_value = getattr(self._client_options, "api_key", None) if api_key_value and credentials: raise ValueError( "client_options.api_key and credentials are mutually exclusive" @@ -425,20 +631,30 @@ def __init__( # Save or instantiate the transport. # Ordinarily, we provide the transport, but allowing a custom transport # instance provides an extensibility point for unusual situations. - if isinstance(transport, BigtableTransport): + transport_provided = isinstance(transport, BigtableTransport) + if transport_provided: # transport is a BigtableTransport instance. - if credentials or client_options.credentials_file or api_key_value: + if credentials or self._client_options.credentials_file or api_key_value: raise ValueError( "When providing a transport instance, " "provide its credentials directly." ) - if client_options.scopes: + if self._client_options.scopes: raise ValueError( "When providing a transport instance, provide its scopes " "directly." ) - self._transport = transport - else: + self._transport = cast(BigtableTransport, transport) + self._api_endpoint = self._transport.host + + self._api_endpoint = self._api_endpoint or BigtableClient._get_api_endpoint( + self._client_options.api_endpoint, + self._client_cert_source, + self._universe_domain, + self._use_mtls_endpoint, + ) + + if not transport_provided: import google.auth._default # type: ignore if api_key_value and hasattr( @@ -448,17 +664,17 @@ def __init__( api_key_value ) - Transport = type(self).get_transport_class(transport) + Transport = type(self).get_transport_class(cast(str, transport)) self._transport = Transport( credentials=credentials, - credentials_file=client_options.credentials_file, - host=api_endpoint, - scopes=client_options.scopes, - client_cert_source_for_mtls=client_cert_source_func, - quota_project_id=client_options.quota_project_id, + credentials_file=self._client_options.credentials_file, + host=self._api_endpoint, + scopes=self._client_options.scopes, + client_cert_source_for_mtls=self._client_cert_source, + quota_project_id=self._client_options.quota_project_id, client_info=client_info, always_use_jwt_access=True, - api_audience=client_options.api_audience, + api_audience=self._client_options.api_audience, ) def read_rows( @@ -555,6 +771,9 @@ def read_rows( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -659,6 +878,9 @@ def sample_row_keys( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -784,6 +1006,9 @@ def mutate_row( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -903,6 +1128,9 @@ def mutate_rows( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1066,6 +1294,9 @@ def check_and_mutate_row( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1167,6 +1398,9 @@ def ping_and_warm( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1299,6 +1533,9 @@ def read_modify_write_row( gapic_v1.routing_header.to_grpc_metadata(header_params), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1405,6 +1642,9 @@ def generate_initial_change_stream_partitions( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, @@ -1503,6 +1743,9 @@ def read_change_stream( ), ) + # Validate the universe domain. + self._validate_universe_domain() + # Send the request. response = rpc( request, diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/google/cloud/bigtable_v2/services/bigtable/transports/base.py index b580bbca7..7d1475eb9 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/base.py @@ -64,7 +64,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtable.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none @@ -127,6 +127,10 @@ def __init__( host += ":443" self._host = host + @property + def host(self): + return self._host + def _prep_wrapped_messages(self, client_info): # Precompute the wrapped methods. self._wrapped_methods = { diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py index 8ba04e761..bec9c85f1 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py @@ -65,7 +65,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtable.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py index 2c0cbdad6..f6890d280 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py @@ -110,7 +110,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtable.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py index 31d230f94..17b47cb1c 100644 --- a/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ b/google/cloud/bigtable_v2/services/bigtable/transports/rest.py @@ -34,9 +34,9 @@ import warnings try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault] + OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object] # type: ignore + OptionalRetry = Union[retries.Retry, object, None] # type: ignore from google.cloud.bigtable_v2.types import bigtable @@ -386,7 +386,7 @@ def __init__( Args: host (Optional[str]): - The hostname to connect to. + The hostname to connect to (default: 'bigtable.googleapis.com'). credentials (Optional[google.auth.credentials.Credentials]): The authorization credentials to attach to requests. These credentials identify the application to the service; if none diff --git a/owl-bot-staging/bigtable/v2/.coveragerc b/owl-bot-staging/bigtable/v2/.coveragerc deleted file mode 100644 index 9b4f2d9d5..000000000 --- a/owl-bot-staging/bigtable/v2/.coveragerc +++ /dev/null @@ -1,13 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/bigtable/__init__.py - google/cloud/bigtable/gapic_version.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ diff --git a/owl-bot-staging/bigtable/v2/.flake8 b/owl-bot-staging/bigtable/v2/.flake8 deleted file mode 100644 index 29227d4cf..000000000 --- a/owl-bot-staging/bigtable/v2/.flake8 +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generated by synthtool. DO NOT EDIT! -[flake8] -ignore = E203, E266, E501, W503 -exclude = - # Exclude generated code. - **/proto/** - **/gapic/** - **/services/** - **/types/** - *_pb2.py - - # Standard linting exemptions. - **/.nox/** - __pycache__, - .git, - *.pyc, - conf.py diff --git a/owl-bot-staging/bigtable/v2/MANIFEST.in b/owl-bot-staging/bigtable/v2/MANIFEST.in deleted file mode 100644 index 9a3ef5517..000000000 --- a/owl-bot-staging/bigtable/v2/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/bigtable *.py -recursive-include google/cloud/bigtable_v2 *.py diff --git a/owl-bot-staging/bigtable/v2/README.rst b/owl-bot-staging/bigtable/v2/README.rst deleted file mode 100644 index d36a3eb74..000000000 --- a/owl-bot-staging/bigtable/v2/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Bigtable API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Bigtable API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/bigtable/v2/docs/_static/custom.css b/owl-bot-staging/bigtable/v2/docs/_static/custom.css deleted file mode 100644 index 06423be0b..000000000 --- a/owl-bot-staging/bigtable/v2/docs/_static/custom.css +++ /dev/null @@ -1,3 +0,0 @@ -dl.field-list > dt { - min-width: 100px -} diff --git a/owl-bot-staging/bigtable/v2/docs/bigtable_v2/bigtable.rst b/owl-bot-staging/bigtable/v2/docs/bigtable_v2/bigtable.rst deleted file mode 100644 index 9f92e0fee..000000000 --- a/owl-bot-staging/bigtable/v2/docs/bigtable_v2/bigtable.rst +++ /dev/null @@ -1,6 +0,0 @@ -Bigtable --------------------------- - -.. automodule:: google.cloud.bigtable_v2.services.bigtable - :members: - :inherited-members: diff --git a/owl-bot-staging/bigtable/v2/docs/bigtable_v2/services_.rst b/owl-bot-staging/bigtable/v2/docs/bigtable_v2/services_.rst deleted file mode 100644 index 1de472763..000000000 --- a/owl-bot-staging/bigtable/v2/docs/bigtable_v2/services_.rst +++ /dev/null @@ -1,6 +0,0 @@ -Services for Google Cloud Bigtable v2 API -========================================= -.. toctree:: - :maxdepth: 2 - - bigtable diff --git a/owl-bot-staging/bigtable/v2/docs/bigtable_v2/types_.rst b/owl-bot-staging/bigtable/v2/docs/bigtable_v2/types_.rst deleted file mode 100644 index 56a8941a2..000000000 --- a/owl-bot-staging/bigtable/v2/docs/bigtable_v2/types_.rst +++ /dev/null @@ -1,6 +0,0 @@ -Types for Google Cloud Bigtable v2 API -====================================== - -.. automodule:: google.cloud.bigtable_v2.types - :members: - :show-inheritance: diff --git a/owl-bot-staging/bigtable/v2/docs/conf.py b/owl-bot-staging/bigtable/v2/docs/conf.py deleted file mode 100644 index 79859d39b..000000000 --- a/owl-bot-staging/bigtable/v2/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-bigtable documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "4.0.1" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The root toctree document. -root_doc = "index" - -# General information about the project. -project = u"google-cloud-bigtable" -copyright = u"2023, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = 'en' - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-bigtable-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - root_doc, - "google-cloud-bigtable.tex", - u"google-cloud-bigtable Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - root_doc, - "google-cloud-bigtable", - u"Google Cloud Bigtable Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - root_doc, - "google-cloud-bigtable", - u"google-cloud-bigtable Documentation", - author, - "google-cloud-bigtable", - "GAPIC library for Google Cloud Bigtable API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/bigtable/v2/docs/index.rst b/owl-bot-staging/bigtable/v2/docs/index.rst deleted file mode 100644 index 10a273382..000000000 --- a/owl-bot-staging/bigtable/v2/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - bigtable_v2/services - bigtable_v2/types diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable/__init__.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable/__init__.py deleted file mode 100644 index d36716cc2..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable/__init__.py +++ /dev/null @@ -1,107 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from google.cloud.bigtable import gapic_version as package_version - -__version__ = package_version.__version__ - - -from google.cloud.bigtable_v2.services.bigtable.client import BigtableClient -from google.cloud.bigtable_v2.services.bigtable.async_client import BigtableAsyncClient - -from google.cloud.bigtable_v2.types.bigtable import CheckAndMutateRowRequest -from google.cloud.bigtable_v2.types.bigtable import CheckAndMutateRowResponse -from google.cloud.bigtable_v2.types.bigtable import GenerateInitialChangeStreamPartitionsRequest -from google.cloud.bigtable_v2.types.bigtable import GenerateInitialChangeStreamPartitionsResponse -from google.cloud.bigtable_v2.types.bigtable import MutateRowRequest -from google.cloud.bigtable_v2.types.bigtable import MutateRowResponse -from google.cloud.bigtable_v2.types.bigtable import MutateRowsRequest -from google.cloud.bigtable_v2.types.bigtable import MutateRowsResponse -from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest -from google.cloud.bigtable_v2.types.bigtable import PingAndWarmResponse -from google.cloud.bigtable_v2.types.bigtable import RateLimitInfo -from google.cloud.bigtable_v2.types.bigtable import ReadChangeStreamRequest -from google.cloud.bigtable_v2.types.bigtable import ReadChangeStreamResponse -from google.cloud.bigtable_v2.types.bigtable import ReadModifyWriteRowRequest -from google.cloud.bigtable_v2.types.bigtable import ReadModifyWriteRowResponse -from google.cloud.bigtable_v2.types.bigtable import ReadRowsRequest -from google.cloud.bigtable_v2.types.bigtable import ReadRowsResponse -from google.cloud.bigtable_v2.types.bigtable import SampleRowKeysRequest -from google.cloud.bigtable_v2.types.bigtable import SampleRowKeysResponse -from google.cloud.bigtable_v2.types.data import Cell -from google.cloud.bigtable_v2.types.data import Column -from google.cloud.bigtable_v2.types.data import ColumnRange -from google.cloud.bigtable_v2.types.data import Family -from google.cloud.bigtable_v2.types.data import Mutation -from google.cloud.bigtable_v2.types.data import ReadModifyWriteRule -from google.cloud.bigtable_v2.types.data import Row -from google.cloud.bigtable_v2.types.data import RowFilter -from google.cloud.bigtable_v2.types.data import RowRange -from google.cloud.bigtable_v2.types.data import RowSet -from google.cloud.bigtable_v2.types.data import StreamContinuationToken -from google.cloud.bigtable_v2.types.data import StreamContinuationTokens -from google.cloud.bigtable_v2.types.data import StreamPartition -from google.cloud.bigtable_v2.types.data import TimestampRange -from google.cloud.bigtable_v2.types.data import ValueRange -from google.cloud.bigtable_v2.types.feature_flags import FeatureFlags -from google.cloud.bigtable_v2.types.request_stats import FullReadStatsView -from google.cloud.bigtable_v2.types.request_stats import ReadIterationStats -from google.cloud.bigtable_v2.types.request_stats import RequestLatencyStats -from google.cloud.bigtable_v2.types.request_stats import RequestStats -from google.cloud.bigtable_v2.types.response_params import ResponseParams - -__all__ = ('BigtableClient', - 'BigtableAsyncClient', - 'CheckAndMutateRowRequest', - 'CheckAndMutateRowResponse', - 'GenerateInitialChangeStreamPartitionsRequest', - 'GenerateInitialChangeStreamPartitionsResponse', - 'MutateRowRequest', - 'MutateRowResponse', - 'MutateRowsRequest', - 'MutateRowsResponse', - 'PingAndWarmRequest', - 'PingAndWarmResponse', - 'RateLimitInfo', - 'ReadChangeStreamRequest', - 'ReadChangeStreamResponse', - 'ReadModifyWriteRowRequest', - 'ReadModifyWriteRowResponse', - 'ReadRowsRequest', - 'ReadRowsResponse', - 'SampleRowKeysRequest', - 'SampleRowKeysResponse', - 'Cell', - 'Column', - 'ColumnRange', - 'Family', - 'Mutation', - 'ReadModifyWriteRule', - 'Row', - 'RowFilter', - 'RowRange', - 'RowSet', - 'StreamContinuationToken', - 'StreamContinuationTokens', - 'StreamPartition', - 'TimestampRange', - 'ValueRange', - 'FeatureFlags', - 'FullReadStatsView', - 'ReadIterationStats', - 'RequestLatencyStats', - 'RequestStats', - 'ResponseParams', -) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable/gapic_version.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable/gapic_version.py deleted file mode 100644 index 360a0d13e..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable/gapic_version.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable/py.typed b/owl-bot-staging/bigtable/v2/google/cloud/bigtable/py.typed deleted file mode 100644 index 889d34043..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-bigtable package uses inline types. diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/__init__.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/__init__.py deleted file mode 100644 index 7383df7c5..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/__init__.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from google.cloud.bigtable_v2 import gapic_version as package_version - -__version__ = package_version.__version__ - - -from .services.bigtable import BigtableClient -from .services.bigtable import BigtableAsyncClient - -from .types.bigtable import CheckAndMutateRowRequest -from .types.bigtable import CheckAndMutateRowResponse -from .types.bigtable import GenerateInitialChangeStreamPartitionsRequest -from .types.bigtable import GenerateInitialChangeStreamPartitionsResponse -from .types.bigtable import MutateRowRequest -from .types.bigtable import MutateRowResponse -from .types.bigtable import MutateRowsRequest -from .types.bigtable import MutateRowsResponse -from .types.bigtable import PingAndWarmRequest -from .types.bigtable import PingAndWarmResponse -from .types.bigtable import RateLimitInfo -from .types.bigtable import ReadChangeStreamRequest -from .types.bigtable import ReadChangeStreamResponse -from .types.bigtable import ReadModifyWriteRowRequest -from .types.bigtable import ReadModifyWriteRowResponse -from .types.bigtable import ReadRowsRequest -from .types.bigtable import ReadRowsResponse -from .types.bigtable import SampleRowKeysRequest -from .types.bigtable import SampleRowKeysResponse -from .types.data import Cell -from .types.data import Column -from .types.data import ColumnRange -from .types.data import Family -from .types.data import Mutation -from .types.data import ReadModifyWriteRule -from .types.data import Row -from .types.data import RowFilter -from .types.data import RowRange -from .types.data import RowSet -from .types.data import StreamContinuationToken -from .types.data import StreamContinuationTokens -from .types.data import StreamPartition -from .types.data import TimestampRange -from .types.data import ValueRange -from .types.feature_flags import FeatureFlags -from .types.request_stats import FullReadStatsView -from .types.request_stats import ReadIterationStats -from .types.request_stats import RequestLatencyStats -from .types.request_stats import RequestStats -from .types.response_params import ResponseParams - -__all__ = ( - 'BigtableAsyncClient', -'BigtableClient', -'Cell', -'CheckAndMutateRowRequest', -'CheckAndMutateRowResponse', -'Column', -'ColumnRange', -'Family', -'FeatureFlags', -'FullReadStatsView', -'GenerateInitialChangeStreamPartitionsRequest', -'GenerateInitialChangeStreamPartitionsResponse', -'MutateRowRequest', -'MutateRowResponse', -'MutateRowsRequest', -'MutateRowsResponse', -'Mutation', -'PingAndWarmRequest', -'PingAndWarmResponse', -'RateLimitInfo', -'ReadChangeStreamRequest', -'ReadChangeStreamResponse', -'ReadIterationStats', -'ReadModifyWriteRowRequest', -'ReadModifyWriteRowResponse', -'ReadModifyWriteRule', -'ReadRowsRequest', -'ReadRowsResponse', -'RequestLatencyStats', -'RequestStats', -'ResponseParams', -'Row', -'RowFilter', -'RowRange', -'RowSet', -'SampleRowKeysRequest', -'SampleRowKeysResponse', -'StreamContinuationToken', -'StreamContinuationTokens', -'StreamPartition', -'TimestampRange', -'ValueRange', -) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_metadata.json b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_metadata.json deleted file mode 100644 index 181dc8ff5..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_metadata.json +++ /dev/null @@ -1,163 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.bigtable_v2", - "protoPackage": "google.bigtable.v2", - "schema": "1.0", - "services": { - "Bigtable": { - "clients": { - "grpc": { - "libraryClient": "BigtableClient", - "rpcs": { - "CheckAndMutateRow": { - "methods": [ - "check_and_mutate_row" - ] - }, - "GenerateInitialChangeStreamPartitions": { - "methods": [ - "generate_initial_change_stream_partitions" - ] - }, - "MutateRow": { - "methods": [ - "mutate_row" - ] - }, - "MutateRows": { - "methods": [ - "mutate_rows" - ] - }, - "PingAndWarm": { - "methods": [ - "ping_and_warm" - ] - }, - "ReadChangeStream": { - "methods": [ - "read_change_stream" - ] - }, - "ReadModifyWriteRow": { - "methods": [ - "read_modify_write_row" - ] - }, - "ReadRows": { - "methods": [ - "read_rows" - ] - }, - "SampleRowKeys": { - "methods": [ - "sample_row_keys" - ] - } - } - }, - "grpc-async": { - "libraryClient": "BigtableAsyncClient", - "rpcs": { - "CheckAndMutateRow": { - "methods": [ - "check_and_mutate_row" - ] - }, - "GenerateInitialChangeStreamPartitions": { - "methods": [ - "generate_initial_change_stream_partitions" - ] - }, - "MutateRow": { - "methods": [ - "mutate_row" - ] - }, - "MutateRows": { - "methods": [ - "mutate_rows" - ] - }, - "PingAndWarm": { - "methods": [ - "ping_and_warm" - ] - }, - "ReadChangeStream": { - "methods": [ - "read_change_stream" - ] - }, - "ReadModifyWriteRow": { - "methods": [ - "read_modify_write_row" - ] - }, - "ReadRows": { - "methods": [ - "read_rows" - ] - }, - "SampleRowKeys": { - "methods": [ - "sample_row_keys" - ] - } - } - }, - "rest": { - "libraryClient": "BigtableClient", - "rpcs": { - "CheckAndMutateRow": { - "methods": [ - "check_and_mutate_row" - ] - }, - "GenerateInitialChangeStreamPartitions": { - "methods": [ - "generate_initial_change_stream_partitions" - ] - }, - "MutateRow": { - "methods": [ - "mutate_row" - ] - }, - "MutateRows": { - "methods": [ - "mutate_rows" - ] - }, - "PingAndWarm": { - "methods": [ - "ping_and_warm" - ] - }, - "ReadChangeStream": { - "methods": [ - "read_change_stream" - ] - }, - "ReadModifyWriteRow": { - "methods": [ - "read_modify_write_row" - ] - }, - "ReadRows": { - "methods": [ - "read_rows" - ] - }, - "SampleRowKeys": { - "methods": [ - "sample_row_keys" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_version.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_version.py deleted file mode 100644 index 360a0d13e..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/gapic_version.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/py.typed b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/py.typed deleted file mode 100644 index 889d34043..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-bigtable package uses inline types. diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/__init__.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/__init__.py deleted file mode 100644 index 89a37dc92..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/__init__.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/__init__.py deleted file mode 100644 index 749087607..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import BigtableClient -from .async_client import BigtableAsyncClient - -__all__ = ( - 'BigtableClient', - 'BigtableAsyncClient', -) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/async_client.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/async_client.py deleted file mode 100644 index 468fa2557..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/async_client.py +++ /dev/null @@ -1,1225 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, AsyncIterable, Awaitable, Sequence, Tuple, Type, Union - -from google.cloud.bigtable_v2 import gapic_version as package_version - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry_async as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore - -from google.cloud.bigtable_v2.types import bigtable -from google.cloud.bigtable_v2.types import data -from google.cloud.bigtable_v2.types import request_stats -from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport -from .client import BigtableClient - - -class BigtableAsyncClient: - """Service for reading from and writing to existing Bigtable - tables. - """ - - _client: BigtableClient - - # Copy defaults from the synchronous client for use here. - # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. - DEFAULT_ENDPOINT = BigtableClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = BigtableClient.DEFAULT_MTLS_ENDPOINT - _DEFAULT_ENDPOINT_TEMPLATE = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE - _DEFAULT_UNIVERSE = BigtableClient._DEFAULT_UNIVERSE - - instance_path = staticmethod(BigtableClient.instance_path) - parse_instance_path = staticmethod(BigtableClient.parse_instance_path) - table_path = staticmethod(BigtableClient.table_path) - parse_table_path = staticmethod(BigtableClient.parse_table_path) - common_billing_account_path = staticmethod(BigtableClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(BigtableClient.parse_common_billing_account_path) - common_folder_path = staticmethod(BigtableClient.common_folder_path) - parse_common_folder_path = staticmethod(BigtableClient.parse_common_folder_path) - common_organization_path = staticmethod(BigtableClient.common_organization_path) - parse_common_organization_path = staticmethod(BigtableClient.parse_common_organization_path) - common_project_path = staticmethod(BigtableClient.common_project_path) - parse_common_project_path = staticmethod(BigtableClient.parse_common_project_path) - common_location_path = staticmethod(BigtableClient.common_location_path) - parse_common_location_path = staticmethod(BigtableClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableAsyncClient: The constructed client. - """ - return BigtableClient.from_service_account_info.__func__(BigtableAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableAsyncClient: The constructed client. - """ - return BigtableClient.from_service_account_file.__func__(BigtableAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): - """Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - return BigtableClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore - - @property - def transport(self) -> BigtableTransport: - """Returns the transport used by the client instance. - - Returns: - BigtableTransport: The transport used by the client instance. - """ - return self._client.transport - - @property - def api_endpoint(self): - """Return the API endpoint used by the client instance. - - Returns: - str: The API endpoint used by the client instance. - """ - return self._client._api_endpoint - - @property - def universe_domain(self) -> str: - """Return the universe domain used by the client instance. - - Returns: - str: The universe domain used - by the client instance. - """ - return self._client._universe_domain - - get_transport_class = functools.partial(type(BigtableClient).get_transport_class, type(BigtableClient)) - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigtableTransport] = "grpc_asyncio", - client_options: Optional[ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the bigtable async client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.BigtableTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): - Custom options for the client. - - 1. The ``api_endpoint`` property can be used to override the - default endpoint provided by the client when ``transport`` is - not explicitly provided. Only if this property is not set and - ``transport`` was not explicitly provided, the endpoint is - determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment - variable, which have one of the following values: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto-switch to the - default mTLS endpoint if client certificate is present; this is - the default value). - - 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide a client certificate for mTLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - 3. The ``universe_domain`` property can be used to override the - default "googleapis.com" universe. Note that ``api_endpoint`` - property still takes precedence; and ``universe_domain`` is - currently not supported for mTLS. - - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = BigtableClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - def read_rows(self, - request: Optional[Union[bigtable.ReadRowsRequest, dict]] = None, - *, - table_name: Optional[str] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Awaitable[AsyncIterable[bigtable.ReadRowsResponse]]: - r"""Streams back the contents of all requested rows in - key order, optionally applying the same Reader filter to - each. Depending on their size, rows and cells may be - broken up across multiple responses, but atomicity of - each row will still be preserved. See the - ReadRowsResponse documentation for details. - - Args: - request (Optional[Union[google.cloud.bigtable_v2.types.ReadRowsRequest, dict]]): - The request object. Request message for - Bigtable.ReadRows. - table_name (:class:`str`): - Required. The unique name of the table from which to - read. Values are of the form - ``projects//instances//tables/``. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (:class:`str`): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - AsyncIterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: - Response message for - Bigtable.ReadRows. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable.ReadRowsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_rows, - default_timeout=43200.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table_name", request.table_name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def sample_row_keys(self, - request: Optional[Union[bigtable.SampleRowKeysRequest, dict]] = None, - *, - table_name: Optional[str] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Awaitable[AsyncIterable[bigtable.SampleRowKeysResponse]]: - r"""Returns a sample of row keys in the table. The - returned row keys will delimit contiguous sections of - the table of approximately equal size, which can be used - to break up the data for distributed tasks like - mapreduces. - - Args: - request (Optional[Union[google.cloud.bigtable_v2.types.SampleRowKeysRequest, dict]]): - The request object. Request message for - Bigtable.SampleRowKeys. - table_name (:class:`str`): - Required. The unique name of the table from which to - sample row keys. Values are of the form - ``projects//instances//tables/
``. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (:class:`str`): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - AsyncIterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: - Response message for - Bigtable.SampleRowKeys. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable.SampleRowKeysRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.sample_row_keys, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table_name", request.table_name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def mutate_row(self, - request: Optional[Union[bigtable.MutateRowRequest, dict]] = None, - *, - table_name: Optional[str] = None, - row_key: Optional[bytes] = None, - mutations: Optional[MutableSequence[data.Mutation]] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable.MutateRowResponse: - r"""Mutates a row atomically. Cells already present in the row are - left unchanged unless explicitly changed by ``mutation``. - - Args: - request (Optional[Union[google.cloud.bigtable_v2.types.MutateRowRequest, dict]]): - The request object. Request message for - Bigtable.MutateRow. - table_name (:class:`str`): - Required. The unique name of the table to which the - mutation should be applied. Values are of the form - ``projects//instances//tables/
``. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - row_key (:class:`bytes`): - Required. The key of the row to which - the mutation should be applied. - - This corresponds to the ``row_key`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - mutations (:class:`MutableSequence[google.cloud.bigtable_v2.types.Mutation]`): - Required. Changes to be atomically - applied to the specified row. Entries - are applied in order, meaning that - earlier mutations can be masked by later - ones. Must contain at least one entry - and at most 100000. - - This corresponds to the ``mutations`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (:class:`str`): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_v2.types.MutateRowResponse: - Response message for - Bigtable.MutateRow. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable.MutateRowRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if row_key is not None: - request.row_key = row_key - if app_profile_id is not None: - request.app_profile_id = app_profile_id - if mutations: - request.mutations.extend(mutations) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.mutate_row, - default_retry=retries.AsyncRetry( -initial=0.01,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table_name", request.table_name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def mutate_rows(self, - request: Optional[Union[bigtable.MutateRowsRequest, dict]] = None, - *, - table_name: Optional[str] = None, - entries: Optional[MutableSequence[bigtable.MutateRowsRequest.Entry]] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Awaitable[AsyncIterable[bigtable.MutateRowsResponse]]: - r"""Mutates multiple rows in a batch. Each individual row - is mutated atomically as in MutateRow, but the entire - batch is not executed atomically. - - Args: - request (Optional[Union[google.cloud.bigtable_v2.types.MutateRowsRequest, dict]]): - The request object. Request message for - BigtableService.MutateRows. - table_name (:class:`str`): - Required. The unique name of the - table to which the mutations should be - applied. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - entries (:class:`MutableSequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]`): - Required. The row keys and - corresponding mutations to be applied in - bulk. Each entry is applied as an atomic - mutation, but the entries may be applied - in arbitrary order (even between entries - for the same row). At least one entry - must be specified, and in total the - entries can contain at most 100000 - mutations. - - This corresponds to the ``entries`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (:class:`str`): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - AsyncIterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: - Response message for - BigtableService.MutateRows. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, entries, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable.MutateRowsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if app_profile_id is not None: - request.app_profile_id = app_profile_id - if entries: - request.entries.extend(entries) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.mutate_rows, - default_timeout=600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table_name", request.table_name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def check_and_mutate_row(self, - request: Optional[Union[bigtable.CheckAndMutateRowRequest, dict]] = None, - *, - table_name: Optional[str] = None, - row_key: Optional[bytes] = None, - predicate_filter: Optional[data.RowFilter] = None, - true_mutations: Optional[MutableSequence[data.Mutation]] = None, - false_mutations: Optional[MutableSequence[data.Mutation]] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable.CheckAndMutateRowResponse: - r"""Mutates a row atomically based on the output of a - predicate Reader filter. - - Args: - request (Optional[Union[google.cloud.bigtable_v2.types.CheckAndMutateRowRequest, dict]]): - The request object. Request message for - Bigtable.CheckAndMutateRow. - table_name (:class:`str`): - Required. The unique name of the table to which the - conditional mutation should be applied. Values are of - the form - ``projects//instances//tables/
``. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - row_key (:class:`bytes`): - Required. The key of the row to which - the conditional mutation should be - applied. - - This corresponds to the ``row_key`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - predicate_filter (:class:`google.cloud.bigtable_v2.types.RowFilter`): - The filter to be applied to the contents of the - specified row. Depending on whether or not any results - are yielded, either ``true_mutations`` or - ``false_mutations`` will be executed. If unset, checks - that the row contains any values at all. - - This corresponds to the ``predicate_filter`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - true_mutations (:class:`MutableSequence[google.cloud.bigtable_v2.types.Mutation]`): - Changes to be atomically applied to the specified row if - ``predicate_filter`` yields at least one cell when - applied to ``row_key``. Entries are applied in order, - meaning that earlier mutations can be masked by later - ones. Must contain at least one entry if - ``false_mutations`` is empty, and at most 100000. - - This corresponds to the ``true_mutations`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - false_mutations (:class:`MutableSequence[google.cloud.bigtable_v2.types.Mutation]`): - Changes to be atomically applied to the specified row if - ``predicate_filter`` does not yield any cells when - applied to ``row_key``. Entries are applied in order, - meaning that earlier mutations can be masked by later - ones. Must contain at least one entry if - ``true_mutations`` is empty, and at most 100000. - - This corresponds to the ``false_mutations`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (:class:`str`): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: - Response message for - Bigtable.CheckAndMutateRow. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, predicate_filter, true_mutations, false_mutations, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable.CheckAndMutateRowRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if row_key is not None: - request.row_key = row_key - if predicate_filter is not None: - request.predicate_filter = predicate_filter - if app_profile_id is not None: - request.app_profile_id = app_profile_id - if true_mutations: - request.true_mutations.extend(true_mutations) - if false_mutations: - request.false_mutations.extend(false_mutations) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.check_and_mutate_row, - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table_name", request.table_name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def ping_and_warm(self, - request: Optional[Union[bigtable.PingAndWarmRequest, dict]] = None, - *, - name: Optional[str] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable.PingAndWarmResponse: - r"""Warm up associated instance metadata for this - connection. This call is not required but may be useful - for connection keep-alive. - - Args: - request (Optional[Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]]): - The request object. Request message for client connection - keep-alive and warming. - name (:class:`str`): - Required. The unique name of the instance to check - permissions for as well as respond. Values are of the - form ``projects//instances/``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (:class:`str`): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_v2.types.PingAndWarmResponse: - Response message for - Bigtable.PingAndWarm connection - keepalive and warming. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable.PingAndWarmRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.ping_and_warm, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def read_modify_write_row(self, - request: Optional[Union[bigtable.ReadModifyWriteRowRequest, dict]] = None, - *, - table_name: Optional[str] = None, - row_key: Optional[bytes] = None, - rules: Optional[MutableSequence[data.ReadModifyWriteRule]] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable.ReadModifyWriteRowResponse: - r"""Modifies a row atomically on the server. The method - reads the latest existing timestamp and value from the - specified columns and writes a new entry based on - pre-defined read/modify/write rules. The new value for - the timestamp is the greater of the existing timestamp - or the current server time. The method returns the new - contents of all modified cells. - - Args: - request (Optional[Union[google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest, dict]]): - The request object. Request message for - Bigtable.ReadModifyWriteRow. - table_name (:class:`str`): - Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of - the form - ``projects//instances//tables/
``. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - row_key (:class:`bytes`): - Required. The key of the row to which - the read/modify/write rules should be - applied. - - This corresponds to the ``row_key`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - rules (:class:`MutableSequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]`): - Required. Rules specifying how the - specified row's contents are to be - transformed into writes. Entries are - applied in order, meaning that earlier - rules will affect the results of later - ones. - - This corresponds to the ``rules`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (:class:`str`): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: - Response message for - Bigtable.ReadModifyWriteRow. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, rules, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable.ReadModifyWriteRowRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if row_key is not None: - request.row_key = row_key - if app_profile_id is not None: - request.app_profile_id = app_profile_id - if rules: - request.rules.extend(rules) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_modify_write_row, - default_timeout=20.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table_name", request.table_name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def generate_initial_change_stream_partitions(self, - request: Optional[Union[bigtable.GenerateInitialChangeStreamPartitionsRequest, dict]] = None, - *, - table_name: Optional[str] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Awaitable[AsyncIterable[bigtable.GenerateInitialChangeStreamPartitionsResponse]]: - r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. - Returns the current list of partitions that make up the table's - change stream. The union of partitions will cover the entire - keyspace. Partitions can be read with ``ReadChangeStream``. - - Args: - request (Optional[Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]]): - The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for - Bigtable.GenerateInitialChangeStreamPartitions. - table_name (:class:`str`): - Required. The unique name of the table from which to get - change stream partitions. Values are of the form - ``projects//instances//tables/
``. - Change streaming must be enabled on the table. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (:class:`str`): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. Single cluster routing must be - configured on the profile. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - AsyncIterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: - NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for - Bigtable.GenerateInitialChangeStreamPartitions. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable.GenerateInitialChangeStreamPartitionsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.generate_initial_change_stream_partitions, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table_name", request.table_name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def read_change_stream(self, - request: Optional[Union[bigtable.ReadChangeStreamRequest, dict]] = None, - *, - table_name: Optional[str] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Awaitable[AsyncIterable[bigtable.ReadChangeStreamResponse]]: - r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. - - Args: - request (Optional[Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]]): - The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for Bigtable.ReadChangeStream. - table_name (:class:`str`): - Required. The unique name of the table from which to - read a change stream. Values are of the form - ``projects//instances//tables/
``. - Change streaming must be enabled on the table. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (:class:`str`): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. Single cluster routing must be - configured on the profile. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - AsyncIterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: - NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for Bigtable.ReadChangeStream. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable.ReadChangeStreamRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.read_change_stream, - default_timeout=43200.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table_name", request.table_name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self) -> "BigtableAsyncClient": - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "BigtableAsyncClient", -) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/client.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/client.py deleted file mode 100644 index 49db1bb3d..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/client.py +++ /dev/null @@ -1,1625 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Iterable, Sequence, Tuple, Type, Union, cast -import warnings - -from google.cloud.bigtable_v2 import gapic_version as package_version - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - -from google.cloud.bigtable_v2.types import bigtable -from google.cloud.bigtable_v2.types import data -from google.cloud.bigtable_v2.types import request_stats -from .transports.base import BigtableTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import BigtableGrpcTransport -from .transports.grpc_asyncio import BigtableGrpcAsyncIOTransport -from .transports.rest import BigtableRestTransport - - -class BigtableClientMeta(type): - """Metaclass for the Bigtable client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] - _transport_registry["grpc"] = BigtableGrpcTransport - _transport_registry["grpc_asyncio"] = BigtableGrpcAsyncIOTransport - _transport_registry["rest"] = BigtableRestTransport - - def get_transport_class(cls, - label: Optional[str] = None, - ) -> Type[BigtableTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class BigtableClient(metaclass=BigtableClientMeta): - """Service for reading from and writing to existing Bigtable - tables. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. - DEFAULT_ENDPOINT = "bigtable.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - _DEFAULT_ENDPOINT_TEMPLATE = "bigtable.{UNIVERSE_DOMAIN}" - _DEFAULT_UNIVERSE = "googleapis.com" - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> BigtableTransport: - """Returns the transport used by the client instance. - - Returns: - BigtableTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def instance_path(project: str,instance: str,) -> str: - """Returns a fully-qualified instance string.""" - return "projects/{project}/instances/{instance}".format(project=project, instance=instance, ) - - @staticmethod - def parse_instance_path(path: str) -> Dict[str,str]: - """Parses a instance path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def table_path(project: str,instance: str,table: str,) -> str: - """Returns a fully-qualified table string.""" - return "projects/{project}/instances/{instance}/tables/{table}".format(project=project, instance=instance, table=table, ) - - @staticmethod - def parse_table_path(path: str) -> Dict[str,str]: - """Parses a table path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): - """Deprecated. Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - - warnings.warn("get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", - DeprecationWarning) - if client_options is None: - client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") - - # Figure out the client cert source to use. - client_cert_source = None - if use_client_cert == "true": - if client_options.client_cert_source: - client_cert_source = client_options.client_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): - api_endpoint = cls.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = cls.DEFAULT_ENDPOINT - - return api_endpoint, client_cert_source - - @staticmethod - def _read_environment_variables(): - """Returns the environment variables used by the client. - - Returns: - Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, - GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. - - Raises: - ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not - any of ["true", "false"]. - google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT - is not any of ["auto", "never", "always"]. - """ - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false").lower() - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() - universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env - - def _get_client_cert_source(provided_cert_source, use_cert_flag): - """Return the client cert source to be used by the client. - - Args: - provided_cert_source (bytes): The client certificate source provided. - use_cert_flag (bool): A flag indicating whether to use the client certificate. - - Returns: - bytes or None: The client cert source to be used by the client. - """ - client_cert_source = None - if use_cert_flag: - if provided_cert_source: - client_cert_source = provided_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - return client_cert_source - - def _get_api_endpoint(api_override, client_cert_source, universe_domain, use_mtls_endpoint): - """Return the API endpoint used by the client. - - Args: - api_override (str): The API endpoint override. If specified, this is always - the return value of this function and the other arguments are not used. - client_cert_source (bytes): The client certificate source used by the client. - universe_domain (str): The universe domain used by the client. - use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. - Possible values are "always", "auto", or "never". - - Returns: - str: The API endpoint to be used by the client. - """ - if api_override is not None: - api_endpoint = api_override - elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): - _default_universe = BigtableClient._DEFAULT_UNIVERSE - if universe_domain != _default_universe: - raise MutualTLSChannelError(f"mTLS is not supported in any universe other than {_default_universe}.") - api_endpoint = BigtableClient.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=universe_domain) - return api_endpoint - - @staticmethod - def _get_universe_domain(client_universe_domain: Optional[str], universe_domain_env: Optional[str]) -> str: - """Return the universe domain used by the client. - - Args: - client_universe_domain (Optional[str]): The universe domain configured via the client options. - universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. - - Returns: - str: The universe domain to be used by the client. - - Raises: - ValueError: If the universe domain is an empty string. - """ - universe_domain = BigtableClient._DEFAULT_UNIVERSE - if client_universe_domain is not None: - universe_domain = client_universe_domain - elif universe_domain_env is not None: - universe_domain = universe_domain_env - if len(universe_domain.strip()) == 0: - raise ValueError("Universe Domain cannot be an empty string.") - return universe_domain - - @staticmethod - def _compare_universes(client_universe: str, - credentials: ga_credentials.Credentials) -> bool: - """Returns True iff the universe domains used by the client and credentials match. - - Args: - client_universe (str): The universe domain configured via the client options. - credentials (ga_credentials.Credentials): The credentials being used in the client. - - Returns: - bool: True iff client_universe matches the universe in credentials. - - Raises: - ValueError: when client_universe does not match the universe in credentials. - """ - if credentials: - credentials_universe = credentials.universe_domain - if client_universe != credentials_universe: - default_universe = BigtableClient._DEFAULT_UNIVERSE - raise ValueError("The configured universe domain " - f"({client_universe}) does not match the universe domain " - f"found in the credentials ({credentials_universe}). " - "If you haven't configured the universe domain explicitly, " - f"`{default_universe}` is the default.") - return True - - def _validate_universe_domain(self): - """Validates client's and credentials' universe domains are consistent. - - Returns: - bool: True iff the configured universe domain is valid. - - Raises: - ValueError: If the configured universe domain is not valid. - """ - self._is_universe_domain_valid = (self._is_universe_domain_valid or - BigtableClient._compare_universes(self.universe_domain, self.transport._credentials)) - return self._is_universe_domain_valid - - @property - def api_endpoint(self): - """Return the API endpoint used by the client instance. - - Returns: - str: The API endpoint used by the client instance. - """ - return self._api_endpoint - - @property - def universe_domain(self) -> str: - """Return the universe domain used by the client instance. - - Returns: - str: The universe domain used by the client instance. - """ - return self._universe_domain - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[Union[str, BigtableTransport]] = None, - client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the bigtable client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, BigtableTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): - Custom options for the client. - - 1. The ``api_endpoint`` property can be used to override the - default endpoint provided by the client when ``transport`` is - not explicitly provided. Only if this property is not set and - ``transport`` was not explicitly provided, the endpoint is - determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment - variable, which have one of the following values: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto-switch to the - default mTLS endpoint if client certificate is present; this is - the default value). - - 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide a client certificate for mTLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - 3. The ``universe_domain`` property can be used to override the - default "googleapis.com" universe. Note that the ``api_endpoint`` - property still takes precedence; and ``universe_domain`` is - currently not supported for mTLS. - - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client_options = client_options - if isinstance(self._client_options, dict): - self._client_options = client_options_lib.from_dict(self._client_options) - if self._client_options is None: - self._client_options = client_options_lib.ClientOptions() - self._client_options = cast(client_options_lib.ClientOptions, self._client_options) - - universe_domain_opt = getattr(self._client_options, 'universe_domain', None) - - self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = BigtableClient._read_environment_variables() - self._client_cert_source = BigtableClient._get_client_cert_source(self._client_options.client_cert_source, self._use_client_cert) - self._universe_domain = BigtableClient._get_universe_domain(universe_domain_opt, self._universe_domain_env) - self._api_endpoint = None # updated below, depending on `transport` - - # Initialize the universe domain validation. - self._is_universe_domain_valid = False - - api_key_value = getattr(self._client_options, "api_key", None) - if api_key_value and credentials: - raise ValueError("client_options.api_key and credentials are mutually exclusive") - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - transport_provided = isinstance(transport, BigtableTransport) - if transport_provided: - # transport is a BigtableTransport instance. - if credentials or self._client_options.credentials_file or api_key_value: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if self._client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = cast(BigtableTransport, transport) - self._api_endpoint = self._transport.host - - self._api_endpoint = (self._api_endpoint or - BigtableClient._get_api_endpoint( - self._client_options.api_endpoint, - self._client_cert_source, - self._universe_domain, - self._use_mtls_endpoint)) - - if not transport_provided: - import google.auth._default # type: ignore - - if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): - credentials = google.auth._default.get_api_key_credentials(api_key_value) - - Transport = type(self).get_transport_class(cast(str, transport)) - self._transport = Transport( - credentials=credentials, - credentials_file=self._client_options.credentials_file, - host=self._api_endpoint, - scopes=self._client_options.scopes, - client_cert_source_for_mtls=self._client_cert_source, - quota_project_id=self._client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - api_audience=self._client_options.api_audience, - ) - - def read_rows(self, - request: Optional[Union[bigtable.ReadRowsRequest, dict]] = None, - *, - table_name: Optional[str] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Iterable[bigtable.ReadRowsResponse]: - r"""Streams back the contents of all requested rows in - key order, optionally applying the same Reader filter to - each. Depending on their size, rows and cells may be - broken up across multiple responses, but atomicity of - each row will still be preserved. See the - ReadRowsResponse documentation for details. - - Args: - request (Union[google.cloud.bigtable_v2.types.ReadRowsRequest, dict]): - The request object. Request message for - Bigtable.ReadRows. - table_name (str): - Required. The unique name of the table from which to - read. Values are of the form - ``projects//instances//tables/
``. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (str): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - Iterable[google.cloud.bigtable_v2.types.ReadRowsResponse]: - Response message for - Bigtable.ReadRows. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.ReadRowsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable.ReadRowsRequest): - request = bigtable.ReadRowsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.read_rows] - - header_params = {} - - routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$') - regex_match = routing_param_regex.match(request.table_name) - if regex_match and regex_match.group("table_name"): - header_params["table_name"] = regex_match.group("table_name") - - if request.app_profile_id: - header_params["app_profile_id"] = request.app_profile_id - - if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def sample_row_keys(self, - request: Optional[Union[bigtable.SampleRowKeysRequest, dict]] = None, - *, - table_name: Optional[str] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Iterable[bigtable.SampleRowKeysResponse]: - r"""Returns a sample of row keys in the table. The - returned row keys will delimit contiguous sections of - the table of approximately equal size, which can be used - to break up the data for distributed tasks like - mapreduces. - - Args: - request (Union[google.cloud.bigtable_v2.types.SampleRowKeysRequest, dict]): - The request object. Request message for - Bigtable.SampleRowKeys. - table_name (str): - Required. The unique name of the table from which to - sample row keys. Values are of the form - ``projects//instances//tables/
``. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (str): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - Iterable[google.cloud.bigtable_v2.types.SampleRowKeysResponse]: - Response message for - Bigtable.SampleRowKeys. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.SampleRowKeysRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable.SampleRowKeysRequest): - request = bigtable.SampleRowKeysRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.sample_row_keys] - - header_params = {} - - routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$') - regex_match = routing_param_regex.match(request.table_name) - if regex_match and regex_match.group("table_name"): - header_params["table_name"] = regex_match.group("table_name") - - if request.app_profile_id: - header_params["app_profile_id"] = request.app_profile_id - - if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def mutate_row(self, - request: Optional[Union[bigtable.MutateRowRequest, dict]] = None, - *, - table_name: Optional[str] = None, - row_key: Optional[bytes] = None, - mutations: Optional[MutableSequence[data.Mutation]] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable.MutateRowResponse: - r"""Mutates a row atomically. Cells already present in the row are - left unchanged unless explicitly changed by ``mutation``. - - Args: - request (Union[google.cloud.bigtable_v2.types.MutateRowRequest, dict]): - The request object. Request message for - Bigtable.MutateRow. - table_name (str): - Required. The unique name of the table to which the - mutation should be applied. Values are of the form - ``projects//instances//tables/
``. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - row_key (bytes): - Required. The key of the row to which - the mutation should be applied. - - This corresponds to the ``row_key`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): - Required. Changes to be atomically - applied to the specified row. Entries - are applied in order, meaning that - earlier mutations can be masked by later - ones. Must contain at least one entry - and at most 100000. - - This corresponds to the ``mutations`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (str): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_v2.types.MutateRowResponse: - Response message for - Bigtable.MutateRow. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, mutations, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.MutateRowRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable.MutateRowRequest): - request = bigtable.MutateRowRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if row_key is not None: - request.row_key = row_key - if mutations is not None: - request.mutations = mutations - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.mutate_row] - - header_params = {} - - routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$') - regex_match = routing_param_regex.match(request.table_name) - if regex_match and regex_match.group("table_name"): - header_params["table_name"] = regex_match.group("table_name") - - if request.app_profile_id: - header_params["app_profile_id"] = request.app_profile_id - - if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def mutate_rows(self, - request: Optional[Union[bigtable.MutateRowsRequest, dict]] = None, - *, - table_name: Optional[str] = None, - entries: Optional[MutableSequence[bigtable.MutateRowsRequest.Entry]] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Iterable[bigtable.MutateRowsResponse]: - r"""Mutates multiple rows in a batch. Each individual row - is mutated atomically as in MutateRow, but the entire - batch is not executed atomically. - - Args: - request (Union[google.cloud.bigtable_v2.types.MutateRowsRequest, dict]): - The request object. Request message for - BigtableService.MutateRows. - table_name (str): - Required. The unique name of the - table to which the mutations should be - applied. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - entries (MutableSequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]): - Required. The row keys and - corresponding mutations to be applied in - bulk. Each entry is applied as an atomic - mutation, but the entries may be applied - in arbitrary order (even between entries - for the same row). At least one entry - must be specified, and in total the - entries can contain at most 100000 - mutations. - - This corresponds to the ``entries`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (str): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - Iterable[google.cloud.bigtable_v2.types.MutateRowsResponse]: - Response message for - BigtableService.MutateRows. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, entries, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.MutateRowsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable.MutateRowsRequest): - request = bigtable.MutateRowsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if entries is not None: - request.entries = entries - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.mutate_rows] - - header_params = {} - - routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$') - regex_match = routing_param_regex.match(request.table_name) - if regex_match and regex_match.group("table_name"): - header_params["table_name"] = regex_match.group("table_name") - - if request.app_profile_id: - header_params["app_profile_id"] = request.app_profile_id - - if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def check_and_mutate_row(self, - request: Optional[Union[bigtable.CheckAndMutateRowRequest, dict]] = None, - *, - table_name: Optional[str] = None, - row_key: Optional[bytes] = None, - predicate_filter: Optional[data.RowFilter] = None, - true_mutations: Optional[MutableSequence[data.Mutation]] = None, - false_mutations: Optional[MutableSequence[data.Mutation]] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable.CheckAndMutateRowResponse: - r"""Mutates a row atomically based on the output of a - predicate Reader filter. - - Args: - request (Union[google.cloud.bigtable_v2.types.CheckAndMutateRowRequest, dict]): - The request object. Request message for - Bigtable.CheckAndMutateRow. - table_name (str): - Required. The unique name of the table to which the - conditional mutation should be applied. Values are of - the form - ``projects//instances//tables/
``. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - row_key (bytes): - Required. The key of the row to which - the conditional mutation should be - applied. - - This corresponds to the ``row_key`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - predicate_filter (google.cloud.bigtable_v2.types.RowFilter): - The filter to be applied to the contents of the - specified row. Depending on whether or not any results - are yielded, either ``true_mutations`` or - ``false_mutations`` will be executed. If unset, checks - that the row contains any values at all. - - This corresponds to the ``predicate_filter`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - true_mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): - Changes to be atomically applied to the specified row if - ``predicate_filter`` yields at least one cell when - applied to ``row_key``. Entries are applied in order, - meaning that earlier mutations can be masked by later - ones. Must contain at least one entry if - ``false_mutations`` is empty, and at most 100000. - - This corresponds to the ``true_mutations`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - false_mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): - Changes to be atomically applied to the specified row if - ``predicate_filter`` does not yield any cells when - applied to ``row_key``. Entries are applied in order, - meaning that earlier mutations can be masked by later - ones. Must contain at least one entry if - ``true_mutations`` is empty, and at most 100000. - - This corresponds to the ``false_mutations`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (str): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_v2.types.CheckAndMutateRowResponse: - Response message for - Bigtable.CheckAndMutateRow. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, predicate_filter, true_mutations, false_mutations, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.CheckAndMutateRowRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable.CheckAndMutateRowRequest): - request = bigtable.CheckAndMutateRowRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if row_key is not None: - request.row_key = row_key - if predicate_filter is not None: - request.predicate_filter = predicate_filter - if true_mutations is not None: - request.true_mutations = true_mutations - if false_mutations is not None: - request.false_mutations = false_mutations - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.check_and_mutate_row] - - header_params = {} - - routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$') - regex_match = routing_param_regex.match(request.table_name) - if regex_match and regex_match.group("table_name"): - header_params["table_name"] = regex_match.group("table_name") - - if request.app_profile_id: - header_params["app_profile_id"] = request.app_profile_id - - if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def ping_and_warm(self, - request: Optional[Union[bigtable.PingAndWarmRequest, dict]] = None, - *, - name: Optional[str] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable.PingAndWarmResponse: - r"""Warm up associated instance metadata for this - connection. This call is not required but may be useful - for connection keep-alive. - - Args: - request (Union[google.cloud.bigtable_v2.types.PingAndWarmRequest, dict]): - The request object. Request message for client connection - keep-alive and warming. - name (str): - Required. The unique name of the instance to check - permissions for as well as respond. Values are of the - form ``projects//instances/``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (str): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_v2.types.PingAndWarmResponse: - Response message for - Bigtable.PingAndWarm connection - keepalive and warming. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.PingAndWarmRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable.PingAndWarmRequest): - request = bigtable.PingAndWarmRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.ping_and_warm] - - header_params = {} - - routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+)$') - regex_match = routing_param_regex.match(request.name) - if regex_match and regex_match.group("name"): - header_params["name"] = regex_match.group("name") - - if request.app_profile_id: - header_params["app_profile_id"] = request.app_profile_id - - if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def read_modify_write_row(self, - request: Optional[Union[bigtable.ReadModifyWriteRowRequest, dict]] = None, - *, - table_name: Optional[str] = None, - row_key: Optional[bytes] = None, - rules: Optional[MutableSequence[data.ReadModifyWriteRule]] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable.ReadModifyWriteRowResponse: - r"""Modifies a row atomically on the server. The method - reads the latest existing timestamp and value from the - specified columns and writes a new entry based on - pre-defined read/modify/write rules. The new value for - the timestamp is the greater of the existing timestamp - or the current server time. The method returns the new - contents of all modified cells. - - Args: - request (Union[google.cloud.bigtable_v2.types.ReadModifyWriteRowRequest, dict]): - The request object. Request message for - Bigtable.ReadModifyWriteRow. - table_name (str): - Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of - the form - ``projects//instances//tables/
``. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - row_key (bytes): - Required. The key of the row to which - the read/modify/write rules should be - applied. - - This corresponds to the ``row_key`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - rules (MutableSequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]): - Required. Rules specifying how the - specified row's contents are to be - transformed into writes. Entries are - applied in order, meaning that earlier - rules will affect the results of later - ones. - - This corresponds to the ``rules`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (str): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_v2.types.ReadModifyWriteRowResponse: - Response message for - Bigtable.ReadModifyWriteRow. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, row_key, rules, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.ReadModifyWriteRowRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable.ReadModifyWriteRowRequest): - request = bigtable.ReadModifyWriteRowRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if row_key is not None: - request.row_key = row_key - if rules is not None: - request.rules = rules - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.read_modify_write_row] - - header_params = {} - - routing_param_regex = re.compile('^(?Pprojects/[^/]+/instances/[^/]+/tables/[^/]+)$') - regex_match = routing_param_regex.match(request.table_name) - if regex_match and regex_match.group("table_name"): - header_params["table_name"] = regex_match.group("table_name") - - if request.app_profile_id: - header_params["app_profile_id"] = request.app_profile_id - - if header_params: - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(header_params), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def generate_initial_change_stream_partitions(self, - request: Optional[Union[bigtable.GenerateInitialChangeStreamPartitionsRequest, dict]] = None, - *, - table_name: Optional[str] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Iterable[bigtable.GenerateInitialChangeStreamPartitionsResponse]: - r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. - Returns the current list of partitions that make up the table's - change stream. The union of partitions will cover the entire - keyspace. Partitions can be read with ``ReadChangeStream``. - - Args: - request (Union[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsRequest, dict]): - The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for - Bigtable.GenerateInitialChangeStreamPartitions. - table_name (str): - Required. The unique name of the table from which to get - change stream partitions. Values are of the form - ``projects//instances//tables/
``. - Change streaming must be enabled on the table. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (str): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. Single cluster routing must be - configured on the profile. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - Iterable[google.cloud.bigtable_v2.types.GenerateInitialChangeStreamPartitionsResponse]: - NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for - Bigtable.GenerateInitialChangeStreamPartitions. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.GenerateInitialChangeStreamPartitionsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable.GenerateInitialChangeStreamPartitionsRequest): - request = bigtable.GenerateInitialChangeStreamPartitionsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.generate_initial_change_stream_partitions] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table_name", request.table_name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def read_change_stream(self, - request: Optional[Union[bigtable.ReadChangeStreamRequest, dict]] = None, - *, - table_name: Optional[str] = None, - app_profile_id: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> Iterable[bigtable.ReadChangeStreamResponse]: - r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. - - Args: - request (Union[google.cloud.bigtable_v2.types.ReadChangeStreamRequest, dict]): - The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for Bigtable.ReadChangeStream. - table_name (str): - Required. The unique name of the table from which to - read a change stream. Values are of the form - ``projects//instances//tables/
``. - Change streaming must be enabled on the table. - - This corresponds to the ``table_name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (str): - This value specifies routing for - replication. If not specified, the - "default" application profile will be - used. Single cluster routing must be - configured on the profile. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - Iterable[google.cloud.bigtable_v2.types.ReadChangeStreamResponse]: - NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for Bigtable.ReadChangeStream. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table_name, app_profile_id]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable.ReadChangeStreamRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable.ReadChangeStreamRequest): - request = bigtable.ReadChangeStreamRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table_name is not None: - request.table_name = table_name - if app_profile_id is not None: - request.app_profile_id = app_profile_id - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.read_change_stream] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table_name", request.table_name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self) -> "BigtableClient": - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - - - - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "BigtableClient", -) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py deleted file mode 100644 index 49f168289..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import BigtableTransport -from .grpc import BigtableGrpcTransport -from .grpc_asyncio import BigtableGrpcAsyncIOTransport -from .rest import BigtableRestTransport -from .rest import BigtableRestInterceptor - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTransport]] -_transport_registry['grpc'] = BigtableGrpcTransport -_transport_registry['grpc_asyncio'] = BigtableGrpcAsyncIOTransport -_transport_registry['rest'] = BigtableRestTransport - -__all__ = ( - 'BigtableTransport', - 'BigtableGrpcTransport', - 'BigtableGrpcAsyncIOTransport', - 'BigtableRestTransport', - 'BigtableRestInterceptor', -) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/base.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/base.py deleted file mode 100644 index 163738225..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/base.py +++ /dev/null @@ -1,276 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union - -from google.cloud.bigtable_v2 import gapic_version as package_version - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.bigtable_v2.types import bigtable - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -class BigtableTransport(abc.ABC): - """Abstract transport class for Bigtable.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/bigtable.data', - 'https://www.googleapis.com/auth/bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-bigtable.data', - 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', - ) - - DEFAULT_HOST: str = 'bigtable.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtable.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - # Don't apply audience if the credentials file passed from user. - if hasattr(credentials, "with_gdch_audience"): - credentials = credentials.with_gdch_audience(api_audience if api_audience else host) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - @property - def host(self): - return self._host - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.read_rows: gapic_v1.method.wrap_method( - self.read_rows, - default_timeout=43200.0, - client_info=client_info, - ), - self.sample_row_keys: gapic_v1.method.wrap_method( - self.sample_row_keys, - default_timeout=60.0, - client_info=client_info, - ), - self.mutate_row: gapic_v1.method.wrap_method( - self.mutate_row, - default_retry=retries.Retry( -initial=0.01,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.mutate_rows: gapic_v1.method.wrap_method( - self.mutate_rows, - default_timeout=600.0, - client_info=client_info, - ), - self.check_and_mutate_row: gapic_v1.method.wrap_method( - self.check_and_mutate_row, - default_timeout=20.0, - client_info=client_info, - ), - self.ping_and_warm: gapic_v1.method.wrap_method( - self.ping_and_warm, - default_timeout=None, - client_info=client_info, - ), - self.read_modify_write_row: gapic_v1.method.wrap_method( - self.read_modify_write_row, - default_timeout=20.0, - client_info=client_info, - ), - self.generate_initial_change_stream_partitions: gapic_v1.method.wrap_method( - self.generate_initial_change_stream_partitions, - default_timeout=60.0, - client_info=client_info, - ), - self.read_change_stream: gapic_v1.method.wrap_method( - self.read_change_stream, - default_timeout=43200.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def read_rows(self) -> Callable[ - [bigtable.ReadRowsRequest], - Union[ - bigtable.ReadRowsResponse, - Awaitable[bigtable.ReadRowsResponse] - ]]: - raise NotImplementedError() - - @property - def sample_row_keys(self) -> Callable[ - [bigtable.SampleRowKeysRequest], - Union[ - bigtable.SampleRowKeysResponse, - Awaitable[bigtable.SampleRowKeysResponse] - ]]: - raise NotImplementedError() - - @property - def mutate_row(self) -> Callable[ - [bigtable.MutateRowRequest], - Union[ - bigtable.MutateRowResponse, - Awaitable[bigtable.MutateRowResponse] - ]]: - raise NotImplementedError() - - @property - def mutate_rows(self) -> Callable[ - [bigtable.MutateRowsRequest], - Union[ - bigtable.MutateRowsResponse, - Awaitable[bigtable.MutateRowsResponse] - ]]: - raise NotImplementedError() - - @property - def check_and_mutate_row(self) -> Callable[ - [bigtable.CheckAndMutateRowRequest], - Union[ - bigtable.CheckAndMutateRowResponse, - Awaitable[bigtable.CheckAndMutateRowResponse] - ]]: - raise NotImplementedError() - - @property - def ping_and_warm(self) -> Callable[ - [bigtable.PingAndWarmRequest], - Union[ - bigtable.PingAndWarmResponse, - Awaitable[bigtable.PingAndWarmResponse] - ]]: - raise NotImplementedError() - - @property - def read_modify_write_row(self) -> Callable[ - [bigtable.ReadModifyWriteRowRequest], - Union[ - bigtable.ReadModifyWriteRowResponse, - Awaitable[bigtable.ReadModifyWriteRowResponse] - ]]: - raise NotImplementedError() - - @property - def generate_initial_change_stream_partitions(self) -> Callable[ - [bigtable.GenerateInitialChangeStreamPartitionsRequest], - Union[ - bigtable.GenerateInitialChangeStreamPartitionsResponse, - Awaitable[bigtable.GenerateInitialChangeStreamPartitionsResponse] - ]]: - raise NotImplementedError() - - @property - def read_change_stream(self) -> Callable[ - [bigtable.ReadChangeStreamRequest], - Union[ - bigtable.ReadChangeStreamResponse, - Awaitable[bigtable.ReadChangeStreamResponse] - ]]: - raise NotImplementedError() - - @property - def kind(self) -> str: - raise NotImplementedError() - - -__all__ = ( - 'BigtableTransport', -) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py deleted file mode 100644 index 058db3be7..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc.py +++ /dev/null @@ -1,501 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.bigtable_v2.types import bigtable -from .base import BigtableTransport, DEFAULT_CLIENT_INFO - - -class BigtableGrpcTransport(BigtableTransport): - """gRPC backend transport for Bigtable. - - Service for reading from and writing to existing Bigtable - tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'bigtable.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[grpc.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtable.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'bigtable.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def read_rows(self) -> Callable[ - [bigtable.ReadRowsRequest], - bigtable.ReadRowsResponse]: - r"""Return a callable for the read rows method over gRPC. - - Streams back the contents of all requested rows in - key order, optionally applying the same Reader filter to - each. Depending on their size, rows and cells may be - broken up across multiple responses, but atomicity of - each row will still be preserved. See the - ReadRowsResponse documentation for details. - - Returns: - Callable[[~.ReadRowsRequest], - ~.ReadRowsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_rows' not in self._stubs: - self._stubs['read_rows'] = self.grpc_channel.unary_stream( - '/google.bigtable.v2.Bigtable/ReadRows', - request_serializer=bigtable.ReadRowsRequest.serialize, - response_deserializer=bigtable.ReadRowsResponse.deserialize, - ) - return self._stubs['read_rows'] - - @property - def sample_row_keys(self) -> Callable[ - [bigtable.SampleRowKeysRequest], - bigtable.SampleRowKeysResponse]: - r"""Return a callable for the sample row keys method over gRPC. - - Returns a sample of row keys in the table. The - returned row keys will delimit contiguous sections of - the table of approximately equal size, which can be used - to break up the data for distributed tasks like - mapreduces. - - Returns: - Callable[[~.SampleRowKeysRequest], - ~.SampleRowKeysResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'sample_row_keys' not in self._stubs: - self._stubs['sample_row_keys'] = self.grpc_channel.unary_stream( - '/google.bigtable.v2.Bigtable/SampleRowKeys', - request_serializer=bigtable.SampleRowKeysRequest.serialize, - response_deserializer=bigtable.SampleRowKeysResponse.deserialize, - ) - return self._stubs['sample_row_keys'] - - @property - def mutate_row(self) -> Callable[ - [bigtable.MutateRowRequest], - bigtable.MutateRowResponse]: - r"""Return a callable for the mutate row method over gRPC. - - Mutates a row atomically. Cells already present in the row are - left unchanged unless explicitly changed by ``mutation``. - - Returns: - Callable[[~.MutateRowRequest], - ~.MutateRowResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'mutate_row' not in self._stubs: - self._stubs['mutate_row'] = self.grpc_channel.unary_unary( - '/google.bigtable.v2.Bigtable/MutateRow', - request_serializer=bigtable.MutateRowRequest.serialize, - response_deserializer=bigtable.MutateRowResponse.deserialize, - ) - return self._stubs['mutate_row'] - - @property - def mutate_rows(self) -> Callable[ - [bigtable.MutateRowsRequest], - bigtable.MutateRowsResponse]: - r"""Return a callable for the mutate rows method over gRPC. - - Mutates multiple rows in a batch. Each individual row - is mutated atomically as in MutateRow, but the entire - batch is not executed atomically. - - Returns: - Callable[[~.MutateRowsRequest], - ~.MutateRowsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'mutate_rows' not in self._stubs: - self._stubs['mutate_rows'] = self.grpc_channel.unary_stream( - '/google.bigtable.v2.Bigtable/MutateRows', - request_serializer=bigtable.MutateRowsRequest.serialize, - response_deserializer=bigtable.MutateRowsResponse.deserialize, - ) - return self._stubs['mutate_rows'] - - @property - def check_and_mutate_row(self) -> Callable[ - [bigtable.CheckAndMutateRowRequest], - bigtable.CheckAndMutateRowResponse]: - r"""Return a callable for the check and mutate row method over gRPC. - - Mutates a row atomically based on the output of a - predicate Reader filter. - - Returns: - Callable[[~.CheckAndMutateRowRequest], - ~.CheckAndMutateRowResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'check_and_mutate_row' not in self._stubs: - self._stubs['check_and_mutate_row'] = self.grpc_channel.unary_unary( - '/google.bigtable.v2.Bigtable/CheckAndMutateRow', - request_serializer=bigtable.CheckAndMutateRowRequest.serialize, - response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, - ) - return self._stubs['check_and_mutate_row'] - - @property - def ping_and_warm(self) -> Callable[ - [bigtable.PingAndWarmRequest], - bigtable.PingAndWarmResponse]: - r"""Return a callable for the ping and warm method over gRPC. - - Warm up associated instance metadata for this - connection. This call is not required but may be useful - for connection keep-alive. - - Returns: - Callable[[~.PingAndWarmRequest], - ~.PingAndWarmResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'ping_and_warm' not in self._stubs: - self._stubs['ping_and_warm'] = self.grpc_channel.unary_unary( - '/google.bigtable.v2.Bigtable/PingAndWarm', - request_serializer=bigtable.PingAndWarmRequest.serialize, - response_deserializer=bigtable.PingAndWarmResponse.deserialize, - ) - return self._stubs['ping_and_warm'] - - @property - def read_modify_write_row(self) -> Callable[ - [bigtable.ReadModifyWriteRowRequest], - bigtable.ReadModifyWriteRowResponse]: - r"""Return a callable for the read modify write row method over gRPC. - - Modifies a row atomically on the server. The method - reads the latest existing timestamp and value from the - specified columns and writes a new entry based on - pre-defined read/modify/write rules. The new value for - the timestamp is the greater of the existing timestamp - or the current server time. The method returns the new - contents of all modified cells. - - Returns: - Callable[[~.ReadModifyWriteRowRequest], - ~.ReadModifyWriteRowResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_modify_write_row' not in self._stubs: - self._stubs['read_modify_write_row'] = self.grpc_channel.unary_unary( - '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', - request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, - response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, - ) - return self._stubs['read_modify_write_row'] - - @property - def generate_initial_change_stream_partitions(self) -> Callable[ - [bigtable.GenerateInitialChangeStreamPartitionsRequest], - bigtable.GenerateInitialChangeStreamPartitionsResponse]: - r"""Return a callable for the generate initial change stream - partitions method over gRPC. - - NOTE: This API is intended to be used by Apache Beam BigtableIO. - Returns the current list of partitions that make up the table's - change stream. The union of partitions will cover the entire - keyspace. Partitions can be read with ``ReadChangeStream``. - - Returns: - Callable[[~.GenerateInitialChangeStreamPartitionsRequest], - ~.GenerateInitialChangeStreamPartitionsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'generate_initial_change_stream_partitions' not in self._stubs: - self._stubs['generate_initial_change_stream_partitions'] = self.grpc_channel.unary_stream( - '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions', - request_serializer=bigtable.GenerateInitialChangeStreamPartitionsRequest.serialize, - response_deserializer=bigtable.GenerateInitialChangeStreamPartitionsResponse.deserialize, - ) - return self._stubs['generate_initial_change_stream_partitions'] - - @property - def read_change_stream(self) -> Callable[ - [bigtable.ReadChangeStreamRequest], - bigtable.ReadChangeStreamResponse]: - r"""Return a callable for the read change stream method over gRPC. - - NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. - - Returns: - Callable[[~.ReadChangeStreamRequest], - ~.ReadChangeStreamResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_change_stream' not in self._stubs: - self._stubs['read_change_stream'] = self.grpc_channel.unary_stream( - '/google.bigtable.v2.Bigtable/ReadChangeStream', - request_serializer=bigtable.ReadChangeStreamRequest.serialize, - response_deserializer=bigtable.ReadChangeStreamResponse.deserialize, - ) - return self._stubs['read_change_stream'] - - def close(self): - self.grpc_channel.close() - - @property - def kind(self) -> str: - return "grpc" - - -__all__ = ( - 'BigtableGrpcTransport', -) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py deleted file mode 100644 index 26aabedec..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/grpc_asyncio.py +++ /dev/null @@ -1,500 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.bigtable_v2.types import bigtable -from .base import BigtableTransport, DEFAULT_CLIENT_INFO -from .grpc import BigtableGrpcTransport - - -class BigtableGrpcAsyncIOTransport(BigtableTransport): - """gRPC AsyncIO backend transport for Bigtable. - - Service for reading from and writing to existing Bigtable - tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'bigtable.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'bigtable.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[aio.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtable.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def read_rows(self) -> Callable[ - [bigtable.ReadRowsRequest], - Awaitable[bigtable.ReadRowsResponse]]: - r"""Return a callable for the read rows method over gRPC. - - Streams back the contents of all requested rows in - key order, optionally applying the same Reader filter to - each. Depending on their size, rows and cells may be - broken up across multiple responses, but atomicity of - each row will still be preserved. See the - ReadRowsResponse documentation for details. - - Returns: - Callable[[~.ReadRowsRequest], - Awaitable[~.ReadRowsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_rows' not in self._stubs: - self._stubs['read_rows'] = self.grpc_channel.unary_stream( - '/google.bigtable.v2.Bigtable/ReadRows', - request_serializer=bigtable.ReadRowsRequest.serialize, - response_deserializer=bigtable.ReadRowsResponse.deserialize, - ) - return self._stubs['read_rows'] - - @property - def sample_row_keys(self) -> Callable[ - [bigtable.SampleRowKeysRequest], - Awaitable[bigtable.SampleRowKeysResponse]]: - r"""Return a callable for the sample row keys method over gRPC. - - Returns a sample of row keys in the table. The - returned row keys will delimit contiguous sections of - the table of approximately equal size, which can be used - to break up the data for distributed tasks like - mapreduces. - - Returns: - Callable[[~.SampleRowKeysRequest], - Awaitable[~.SampleRowKeysResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'sample_row_keys' not in self._stubs: - self._stubs['sample_row_keys'] = self.grpc_channel.unary_stream( - '/google.bigtable.v2.Bigtable/SampleRowKeys', - request_serializer=bigtable.SampleRowKeysRequest.serialize, - response_deserializer=bigtable.SampleRowKeysResponse.deserialize, - ) - return self._stubs['sample_row_keys'] - - @property - def mutate_row(self) -> Callable[ - [bigtable.MutateRowRequest], - Awaitable[bigtable.MutateRowResponse]]: - r"""Return a callable for the mutate row method over gRPC. - - Mutates a row atomically. Cells already present in the row are - left unchanged unless explicitly changed by ``mutation``. - - Returns: - Callable[[~.MutateRowRequest], - Awaitable[~.MutateRowResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'mutate_row' not in self._stubs: - self._stubs['mutate_row'] = self.grpc_channel.unary_unary( - '/google.bigtable.v2.Bigtable/MutateRow', - request_serializer=bigtable.MutateRowRequest.serialize, - response_deserializer=bigtable.MutateRowResponse.deserialize, - ) - return self._stubs['mutate_row'] - - @property - def mutate_rows(self) -> Callable[ - [bigtable.MutateRowsRequest], - Awaitable[bigtable.MutateRowsResponse]]: - r"""Return a callable for the mutate rows method over gRPC. - - Mutates multiple rows in a batch. Each individual row - is mutated atomically as in MutateRow, but the entire - batch is not executed atomically. - - Returns: - Callable[[~.MutateRowsRequest], - Awaitable[~.MutateRowsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'mutate_rows' not in self._stubs: - self._stubs['mutate_rows'] = self.grpc_channel.unary_stream( - '/google.bigtable.v2.Bigtable/MutateRows', - request_serializer=bigtable.MutateRowsRequest.serialize, - response_deserializer=bigtable.MutateRowsResponse.deserialize, - ) - return self._stubs['mutate_rows'] - - @property - def check_and_mutate_row(self) -> Callable[ - [bigtable.CheckAndMutateRowRequest], - Awaitable[bigtable.CheckAndMutateRowResponse]]: - r"""Return a callable for the check and mutate row method over gRPC. - - Mutates a row atomically based on the output of a - predicate Reader filter. - - Returns: - Callable[[~.CheckAndMutateRowRequest], - Awaitable[~.CheckAndMutateRowResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'check_and_mutate_row' not in self._stubs: - self._stubs['check_and_mutate_row'] = self.grpc_channel.unary_unary( - '/google.bigtable.v2.Bigtable/CheckAndMutateRow', - request_serializer=bigtable.CheckAndMutateRowRequest.serialize, - response_deserializer=bigtable.CheckAndMutateRowResponse.deserialize, - ) - return self._stubs['check_and_mutate_row'] - - @property - def ping_and_warm(self) -> Callable[ - [bigtable.PingAndWarmRequest], - Awaitable[bigtable.PingAndWarmResponse]]: - r"""Return a callable for the ping and warm method over gRPC. - - Warm up associated instance metadata for this - connection. This call is not required but may be useful - for connection keep-alive. - - Returns: - Callable[[~.PingAndWarmRequest], - Awaitable[~.PingAndWarmResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'ping_and_warm' not in self._stubs: - self._stubs['ping_and_warm'] = self.grpc_channel.unary_unary( - '/google.bigtable.v2.Bigtable/PingAndWarm', - request_serializer=bigtable.PingAndWarmRequest.serialize, - response_deserializer=bigtable.PingAndWarmResponse.deserialize, - ) - return self._stubs['ping_and_warm'] - - @property - def read_modify_write_row(self) -> Callable[ - [bigtable.ReadModifyWriteRowRequest], - Awaitable[bigtable.ReadModifyWriteRowResponse]]: - r"""Return a callable for the read modify write row method over gRPC. - - Modifies a row atomically on the server. The method - reads the latest existing timestamp and value from the - specified columns and writes a new entry based on - pre-defined read/modify/write rules. The new value for - the timestamp is the greater of the existing timestamp - or the current server time. The method returns the new - contents of all modified cells. - - Returns: - Callable[[~.ReadModifyWriteRowRequest], - Awaitable[~.ReadModifyWriteRowResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_modify_write_row' not in self._stubs: - self._stubs['read_modify_write_row'] = self.grpc_channel.unary_unary( - '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', - request_serializer=bigtable.ReadModifyWriteRowRequest.serialize, - response_deserializer=bigtable.ReadModifyWriteRowResponse.deserialize, - ) - return self._stubs['read_modify_write_row'] - - @property - def generate_initial_change_stream_partitions(self) -> Callable[ - [bigtable.GenerateInitialChangeStreamPartitionsRequest], - Awaitable[bigtable.GenerateInitialChangeStreamPartitionsResponse]]: - r"""Return a callable for the generate initial change stream - partitions method over gRPC. - - NOTE: This API is intended to be used by Apache Beam BigtableIO. - Returns the current list of partitions that make up the table's - change stream. The union of partitions will cover the entire - keyspace. Partitions can be read with ``ReadChangeStream``. - - Returns: - Callable[[~.GenerateInitialChangeStreamPartitionsRequest], - Awaitable[~.GenerateInitialChangeStreamPartitionsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'generate_initial_change_stream_partitions' not in self._stubs: - self._stubs['generate_initial_change_stream_partitions'] = self.grpc_channel.unary_stream( - '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions', - request_serializer=bigtable.GenerateInitialChangeStreamPartitionsRequest.serialize, - response_deserializer=bigtable.GenerateInitialChangeStreamPartitionsResponse.deserialize, - ) - return self._stubs['generate_initial_change_stream_partitions'] - - @property - def read_change_stream(self) -> Callable[ - [bigtable.ReadChangeStreamRequest], - Awaitable[bigtable.ReadChangeStreamResponse]]: - r"""Return a callable for the read change stream method over gRPC. - - NOTE: This API is intended to be used by Apache Beam - BigtableIO. Reads changes from a table's change stream. - Changes will reflect both user-initiated mutations and - mutations that are caused by garbage collection. - - Returns: - Callable[[~.ReadChangeStreamRequest], - Awaitable[~.ReadChangeStreamResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'read_change_stream' not in self._stubs: - self._stubs['read_change_stream'] = self.grpc_channel.unary_stream( - '/google.bigtable.v2.Bigtable/ReadChangeStream', - request_serializer=bigtable.ReadChangeStreamRequest.serialize, - response_deserializer=bigtable.ReadChangeStreamResponse.deserialize, - ) - return self._stubs['read_change_stream'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'BigtableGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/rest.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/rest.py deleted file mode 100644 index 44ced4c99..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/services/bigtable/transports/rest.py +++ /dev/null @@ -1,1261 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore -import grpc # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.api_core import exceptions as core_exceptions -from google.api_core import retry as retries -from google.api_core import rest_helpers -from google.api_core import rest_streaming -from google.api_core import path_template -from google.api_core import gapic_v1 - -from google.protobuf import json_format -from requests import __version__ as requests_version -import dataclasses -import re -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union -import warnings - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - - -from google.cloud.bigtable_v2.types import bigtable - -from .base import BigtableTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, - grpc_version=None, - rest_version=requests_version, -) - - -class BigtableRestInterceptor: - """Interceptor for Bigtable. - - Interceptors are used to manipulate requests, request metadata, and responses - in arbitrary ways. - Example use cases include: - * Logging - * Verifying requests according to service or custom semantics - * Stripping extraneous information from responses - - These use cases and more can be enabled by injecting an - instance of a custom subclass when constructing the BigtableRestTransport. - - .. code-block:: python - class MyCustomBigtableInterceptor(BigtableRestInterceptor): - def pre_check_and_mutate_row(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_check_and_mutate_row(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_generate_initial_change_stream_partitions(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_generate_initial_change_stream_partitions(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_mutate_row(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_mutate_row(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_mutate_rows(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_mutate_rows(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_ping_and_warm(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_ping_and_warm(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_read_change_stream(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_read_change_stream(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_read_modify_write_row(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_read_modify_write_row(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_read_rows(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_read_rows(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_sample_row_keys(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_sample_row_keys(self, response): - logging.log(f"Received response: {response}") - return response - - transport = BigtableRestTransport(interceptor=MyCustomBigtableInterceptor()) - client = BigtableClient(transport=transport) - - - """ - def pre_check_and_mutate_row(self, request: bigtable.CheckAndMutateRowRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.CheckAndMutateRowRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for check_and_mutate_row - - Override in a subclass to manipulate the request or metadata - before they are sent to the Bigtable server. - """ - return request, metadata - - def post_check_and_mutate_row(self, response: bigtable.CheckAndMutateRowResponse) -> bigtable.CheckAndMutateRowResponse: - """Post-rpc interceptor for check_and_mutate_row - - Override in a subclass to manipulate the response - after it is returned by the Bigtable server but before - it is returned to user code. - """ - return response - def pre_generate_initial_change_stream_partitions(self, request: bigtable.GenerateInitialChangeStreamPartitionsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.GenerateInitialChangeStreamPartitionsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for generate_initial_change_stream_partitions - - Override in a subclass to manipulate the request or metadata - before they are sent to the Bigtable server. - """ - return request, metadata - - def post_generate_initial_change_stream_partitions(self, response: rest_streaming.ResponseIterator) -> rest_streaming.ResponseIterator: - """Post-rpc interceptor for generate_initial_change_stream_partitions - - Override in a subclass to manipulate the response - after it is returned by the Bigtable server but before - it is returned to user code. - """ - return response - def pre_mutate_row(self, request: bigtable.MutateRowRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.MutateRowRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for mutate_row - - Override in a subclass to manipulate the request or metadata - before they are sent to the Bigtable server. - """ - return request, metadata - - def post_mutate_row(self, response: bigtable.MutateRowResponse) -> bigtable.MutateRowResponse: - """Post-rpc interceptor for mutate_row - - Override in a subclass to manipulate the response - after it is returned by the Bigtable server but before - it is returned to user code. - """ - return response - def pre_mutate_rows(self, request: bigtable.MutateRowsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.MutateRowsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for mutate_rows - - Override in a subclass to manipulate the request or metadata - before they are sent to the Bigtable server. - """ - return request, metadata - - def post_mutate_rows(self, response: rest_streaming.ResponseIterator) -> rest_streaming.ResponseIterator: - """Post-rpc interceptor for mutate_rows - - Override in a subclass to manipulate the response - after it is returned by the Bigtable server but before - it is returned to user code. - """ - return response - def pre_ping_and_warm(self, request: bigtable.PingAndWarmRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.PingAndWarmRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for ping_and_warm - - Override in a subclass to manipulate the request or metadata - before they are sent to the Bigtable server. - """ - return request, metadata - - def post_ping_and_warm(self, response: bigtable.PingAndWarmResponse) -> bigtable.PingAndWarmResponse: - """Post-rpc interceptor for ping_and_warm - - Override in a subclass to manipulate the response - after it is returned by the Bigtable server but before - it is returned to user code. - """ - return response - def pre_read_change_stream(self, request: bigtable.ReadChangeStreamRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.ReadChangeStreamRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for read_change_stream - - Override in a subclass to manipulate the request or metadata - before they are sent to the Bigtable server. - """ - return request, metadata - - def post_read_change_stream(self, response: rest_streaming.ResponseIterator) -> rest_streaming.ResponseIterator: - """Post-rpc interceptor for read_change_stream - - Override in a subclass to manipulate the response - after it is returned by the Bigtable server but before - it is returned to user code. - """ - return response - def pre_read_modify_write_row(self, request: bigtable.ReadModifyWriteRowRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.ReadModifyWriteRowRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for read_modify_write_row - - Override in a subclass to manipulate the request or metadata - before they are sent to the Bigtable server. - """ - return request, metadata - - def post_read_modify_write_row(self, response: bigtable.ReadModifyWriteRowResponse) -> bigtable.ReadModifyWriteRowResponse: - """Post-rpc interceptor for read_modify_write_row - - Override in a subclass to manipulate the response - after it is returned by the Bigtable server but before - it is returned to user code. - """ - return response - def pre_read_rows(self, request: bigtable.ReadRowsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.ReadRowsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for read_rows - - Override in a subclass to manipulate the request or metadata - before they are sent to the Bigtable server. - """ - return request, metadata - - def post_read_rows(self, response: rest_streaming.ResponseIterator) -> rest_streaming.ResponseIterator: - """Post-rpc interceptor for read_rows - - Override in a subclass to manipulate the response - after it is returned by the Bigtable server but before - it is returned to user code. - """ - return response - def pre_sample_row_keys(self, request: bigtable.SampleRowKeysRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable.SampleRowKeysRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for sample_row_keys - - Override in a subclass to manipulate the request or metadata - before they are sent to the Bigtable server. - """ - return request, metadata - - def post_sample_row_keys(self, response: rest_streaming.ResponseIterator) -> rest_streaming.ResponseIterator: - """Post-rpc interceptor for sample_row_keys - - Override in a subclass to manipulate the response - after it is returned by the Bigtable server but before - it is returned to user code. - """ - return response - - -@dataclasses.dataclass -class BigtableRestStub: - _session: AuthorizedSession - _host: str - _interceptor: BigtableRestInterceptor - - -class BigtableRestTransport(BigtableTransport): - """REST backend transport for Bigtable. - - Service for reading from and writing to existing Bigtable - tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends JSON representations of protocol buffers over HTTP/1.1 - - """ - - def __init__(self, *, - host: str = 'bigtable.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - client_cert_source_for_mtls: Optional[Callable[[ - ], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - url_scheme: str = 'https', - interceptor: Optional[BigtableRestInterceptor] = None, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtable.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. - """ - # Run the base constructor - # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. - # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the - # credentials object - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - - super().__init__( - host=host, - credentials=credentials, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience - ) - self._session = AuthorizedSession( - self._credentials, default_host=self.DEFAULT_HOST) - if client_cert_source_for_mtls: - self._session.configure_mtls_channel(client_cert_source_for_mtls) - self._interceptor = interceptor or BigtableRestInterceptor() - self._prep_wrapped_messages(client_info) - - class _CheckAndMutateRow(BigtableRestStub): - def __hash__(self): - return hash("CheckAndMutateRow") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable.CheckAndMutateRowRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> bigtable.CheckAndMutateRowResponse: - r"""Call the check and mutate row method over HTTP. - - Args: - request (~.bigtable.CheckAndMutateRowRequest): - The request object. Request message for - Bigtable.CheckAndMutateRow. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable.CheckAndMutateRowResponse: - Response message for - Bigtable.CheckAndMutateRow. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_check_and_mutate_row(request, metadata) - pb_request = bigtable.CheckAndMutateRowRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable.CheckAndMutateRowResponse() - pb_resp = bigtable.CheckAndMutateRowResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_check_and_mutate_row(resp) - return resp - - class _GenerateInitialChangeStreamPartitions(BigtableRestStub): - def __hash__(self): - return hash("GenerateInitialChangeStreamPartitions") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable.GenerateInitialChangeStreamPartitionsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> rest_streaming.ResponseIterator: - r"""Call the generate initial change - stream partitions method over HTTP. - - Args: - request (~.bigtable.GenerateInitialChangeStreamPartitionsRequest): - The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for - Bigtable.GenerateInitialChangeStreamPartitions. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable.GenerateInitialChangeStreamPartitionsResponse: - NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for - Bigtable.GenerateInitialChangeStreamPartitions. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_generate_initial_change_stream_partitions(request, metadata) - pb_request = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = rest_streaming.ResponseIterator(response, bigtable.GenerateInitialChangeStreamPartitionsResponse) - resp = self._interceptor.post_generate_initial_change_stream_partitions(resp) - return resp - - class _MutateRow(BigtableRestStub): - def __hash__(self): - return hash("MutateRow") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable.MutateRowRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> bigtable.MutateRowResponse: - r"""Call the mutate row method over HTTP. - - Args: - request (~.bigtable.MutateRowRequest): - The request object. Request message for - Bigtable.MutateRow. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable.MutateRowResponse: - Response message for - Bigtable.MutateRow. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_mutate_row(request, metadata) - pb_request = bigtable.MutateRowRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable.MutateRowResponse() - pb_resp = bigtable.MutateRowResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_mutate_row(resp) - return resp - - class _MutateRows(BigtableRestStub): - def __hash__(self): - return hash("MutateRows") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable.MutateRowsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> rest_streaming.ResponseIterator: - r"""Call the mutate rows method over HTTP. - - Args: - request (~.bigtable.MutateRowsRequest): - The request object. Request message for - BigtableService.MutateRows. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable.MutateRowsResponse: - Response message for - BigtableService.MutateRows. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_mutate_rows(request, metadata) - pb_request = bigtable.MutateRowsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = rest_streaming.ResponseIterator(response, bigtable.MutateRowsResponse) - resp = self._interceptor.post_mutate_rows(resp) - return resp - - class _PingAndWarm(BigtableRestStub): - def __hash__(self): - return hash("PingAndWarm") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable.PingAndWarmRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> bigtable.PingAndWarmResponse: - r"""Call the ping and warm method over HTTP. - - Args: - request (~.bigtable.PingAndWarmRequest): - The request object. Request message for client connection - keep-alive and warming. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable.PingAndWarmResponse: - Response message for - Bigtable.PingAndWarm connection - keepalive and warming. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{name=projects/*/instances/*}:ping', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_ping_and_warm(request, metadata) - pb_request = bigtable.PingAndWarmRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable.PingAndWarmResponse() - pb_resp = bigtable.PingAndWarmResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_ping_and_warm(resp) - return resp - - class _ReadChangeStream(BigtableRestStub): - def __hash__(self): - return hash("ReadChangeStream") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable.ReadChangeStreamRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> rest_streaming.ResponseIterator: - r"""Call the read change stream method over HTTP. - - Args: - request (~.bigtable.ReadChangeStreamRequest): - The request object. NOTE: This API is intended to be used - by Apache Beam BigtableIO. Request - message for Bigtable.ReadChangeStream. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable.ReadChangeStreamResponse: - NOTE: This API is intended to be used - by Apache Beam BigtableIO. Response - message for Bigtable.ReadChangeStream. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_read_change_stream(request, metadata) - pb_request = bigtable.ReadChangeStreamRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = rest_streaming.ResponseIterator(response, bigtable.ReadChangeStreamResponse) - resp = self._interceptor.post_read_change_stream(resp) - return resp - - class _ReadModifyWriteRow(BigtableRestStub): - def __hash__(self): - return hash("ReadModifyWriteRow") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable.ReadModifyWriteRowRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> bigtable.ReadModifyWriteRowResponse: - r"""Call the read modify write row method over HTTP. - - Args: - request (~.bigtable.ReadModifyWriteRowRequest): - The request object. Request message for - Bigtable.ReadModifyWriteRow. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable.ReadModifyWriteRowResponse: - Response message for - Bigtable.ReadModifyWriteRow. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_read_modify_write_row(request, metadata) - pb_request = bigtable.ReadModifyWriteRowRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable.ReadModifyWriteRowResponse() - pb_resp = bigtable.ReadModifyWriteRowResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_read_modify_write_row(resp) - return resp - - class _ReadRows(BigtableRestStub): - def __hash__(self): - return hash("ReadRows") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable.ReadRowsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> rest_streaming.ResponseIterator: - r"""Call the read rows method over HTTP. - - Args: - request (~.bigtable.ReadRowsRequest): - The request object. Request message for - Bigtable.ReadRows. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable.ReadRowsResponse: - Response message for - Bigtable.ReadRows. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:readRows', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_read_rows(request, metadata) - pb_request = bigtable.ReadRowsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = rest_streaming.ResponseIterator(response, bigtable.ReadRowsResponse) - resp = self._interceptor.post_read_rows(resp) - return resp - - class _SampleRowKeys(BigtableRestStub): - def __hash__(self): - return hash("SampleRowKeys") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable.SampleRowKeysRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> rest_streaming.ResponseIterator: - r"""Call the sample row keys method over HTTP. - - Args: - request (~.bigtable.SampleRowKeysRequest): - The request object. Request message for - Bigtable.SampleRowKeys. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable.SampleRowKeysResponse: - Response message for - Bigtable.SampleRowKeys. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys', - }, - ] - request, metadata = self._interceptor.pre_sample_row_keys(request, metadata) - pb_request = bigtable.SampleRowKeysRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = rest_streaming.ResponseIterator(response, bigtable.SampleRowKeysResponse) - resp = self._interceptor.post_sample_row_keys(resp) - return resp - - @property - def check_and_mutate_row(self) -> Callable[ - [bigtable.CheckAndMutateRowRequest], - bigtable.CheckAndMutateRowResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CheckAndMutateRow(self._session, self._host, self._interceptor) # type: ignore - - @property - def generate_initial_change_stream_partitions(self) -> Callable[ - [bigtable.GenerateInitialChangeStreamPartitionsRequest], - bigtable.GenerateInitialChangeStreamPartitionsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GenerateInitialChangeStreamPartitions(self._session, self._host, self._interceptor) # type: ignore - - @property - def mutate_row(self) -> Callable[ - [bigtable.MutateRowRequest], - bigtable.MutateRowResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._MutateRow(self._session, self._host, self._interceptor) # type: ignore - - @property - def mutate_rows(self) -> Callable[ - [bigtable.MutateRowsRequest], - bigtable.MutateRowsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._MutateRows(self._session, self._host, self._interceptor) # type: ignore - - @property - def ping_and_warm(self) -> Callable[ - [bigtable.PingAndWarmRequest], - bigtable.PingAndWarmResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._PingAndWarm(self._session, self._host, self._interceptor) # type: ignore - - @property - def read_change_stream(self) -> Callable[ - [bigtable.ReadChangeStreamRequest], - bigtable.ReadChangeStreamResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ReadChangeStream(self._session, self._host, self._interceptor) # type: ignore - - @property - def read_modify_write_row(self) -> Callable[ - [bigtable.ReadModifyWriteRowRequest], - bigtable.ReadModifyWriteRowResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ReadModifyWriteRow(self._session, self._host, self._interceptor) # type: ignore - - @property - def read_rows(self) -> Callable[ - [bigtable.ReadRowsRequest], - bigtable.ReadRowsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ReadRows(self._session, self._host, self._interceptor) # type: ignore - - @property - def sample_row_keys(self) -> Callable[ - [bigtable.SampleRowKeysRequest], - bigtable.SampleRowKeysResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._SampleRowKeys(self._session, self._host, self._interceptor) # type: ignore - - @property - def kind(self) -> str: - return "rest" - - def close(self): - self._session.close() - - -__all__=( - 'BigtableRestTransport', -) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/__init__.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/__init__.py deleted file mode 100644 index c618147af..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/__init__.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .bigtable import ( - CheckAndMutateRowRequest, - CheckAndMutateRowResponse, - GenerateInitialChangeStreamPartitionsRequest, - GenerateInitialChangeStreamPartitionsResponse, - MutateRowRequest, - MutateRowResponse, - MutateRowsRequest, - MutateRowsResponse, - PingAndWarmRequest, - PingAndWarmResponse, - RateLimitInfo, - ReadChangeStreamRequest, - ReadChangeStreamResponse, - ReadModifyWriteRowRequest, - ReadModifyWriteRowResponse, - ReadRowsRequest, - ReadRowsResponse, - SampleRowKeysRequest, - SampleRowKeysResponse, -) -from .data import ( - Cell, - Column, - ColumnRange, - Family, - Mutation, - ReadModifyWriteRule, - Row, - RowFilter, - RowRange, - RowSet, - StreamContinuationToken, - StreamContinuationTokens, - StreamPartition, - TimestampRange, - ValueRange, -) -from .feature_flags import ( - FeatureFlags, -) -from .request_stats import ( - FullReadStatsView, - ReadIterationStats, - RequestLatencyStats, - RequestStats, -) -from .response_params import ( - ResponseParams, -) - -__all__ = ( - 'CheckAndMutateRowRequest', - 'CheckAndMutateRowResponse', - 'GenerateInitialChangeStreamPartitionsRequest', - 'GenerateInitialChangeStreamPartitionsResponse', - 'MutateRowRequest', - 'MutateRowResponse', - 'MutateRowsRequest', - 'MutateRowsResponse', - 'PingAndWarmRequest', - 'PingAndWarmResponse', - 'RateLimitInfo', - 'ReadChangeStreamRequest', - 'ReadChangeStreamResponse', - 'ReadModifyWriteRowRequest', - 'ReadModifyWriteRowResponse', - 'ReadRowsRequest', - 'ReadRowsResponse', - 'SampleRowKeysRequest', - 'SampleRowKeysResponse', - 'Cell', - 'Column', - 'ColumnRange', - 'Family', - 'Mutation', - 'ReadModifyWriteRule', - 'Row', - 'RowFilter', - 'RowRange', - 'RowSet', - 'StreamContinuationToken', - 'StreamContinuationTokens', - 'StreamPartition', - 'TimestampRange', - 'ValueRange', - 'FeatureFlags', - 'FullReadStatsView', - 'ReadIterationStats', - 'RequestLatencyStats', - 'RequestStats', - 'ResponseParams', -) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/bigtable.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/bigtable.py deleted file mode 100644 index d0807b46a..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/bigtable.py +++ /dev/null @@ -1,1186 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.bigtable_v2.types import data -from google.cloud.bigtable_v2.types import request_stats as gb_request_stats -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.protobuf import wrappers_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.bigtable.v2', - manifest={ - 'ReadRowsRequest', - 'ReadRowsResponse', - 'SampleRowKeysRequest', - 'SampleRowKeysResponse', - 'MutateRowRequest', - 'MutateRowResponse', - 'MutateRowsRequest', - 'MutateRowsResponse', - 'RateLimitInfo', - 'CheckAndMutateRowRequest', - 'CheckAndMutateRowResponse', - 'PingAndWarmRequest', - 'PingAndWarmResponse', - 'ReadModifyWriteRowRequest', - 'ReadModifyWriteRowResponse', - 'GenerateInitialChangeStreamPartitionsRequest', - 'GenerateInitialChangeStreamPartitionsResponse', - 'ReadChangeStreamRequest', - 'ReadChangeStreamResponse', - }, -) - - -class ReadRowsRequest(proto.Message): - r"""Request message for Bigtable.ReadRows. - - Attributes: - table_name (str): - Required. The unique name of the table from which to read. - Values are of the form - ``projects//instances//tables/
``. - app_profile_id (str): - This value specifies routing for replication. - If not specified, the "default" application - profile will be used. - rows (google.cloud.bigtable_v2.types.RowSet): - The row keys and/or ranges to read - sequentially. If not specified, reads from all - rows. - filter (google.cloud.bigtable_v2.types.RowFilter): - The filter to apply to the contents of the - specified row(s). If unset, reads the entirety - of each row. - rows_limit (int): - The read will stop after committing to N - rows' worth of results. The default (zero) is to - return all results. - request_stats_view (google.cloud.bigtable_v2.types.ReadRowsRequest.RequestStatsView): - The view into RequestStats, as described - above. - reversed (bool): - Experimental API - Please note that this API is currently - experimental and can change in the future. - - Return rows in lexiographical descending order of the row - keys. The row contents will not be affected by this flag. - - Example result set: - - :: - - [ - {key: "k2", "f:col1": "v1", "f:col2": "v1"}, - {key: "k1", "f:col1": "v2", "f:col2": "v2"} - ] - """ - class RequestStatsView(proto.Enum): - r"""The desired view into RequestStats that should be returned in - the response. - See also: RequestStats message. - - Values: - REQUEST_STATS_VIEW_UNSPECIFIED (0): - The default / unset value. The API will - default to the NONE option below. - REQUEST_STATS_NONE (1): - Do not include any RequestStats in the - response. This will leave the RequestStats - embedded message unset in the response. - REQUEST_STATS_FULL (2): - Include the full set of available - RequestStats in the response, applicable to this - read. - """ - REQUEST_STATS_VIEW_UNSPECIFIED = 0 - REQUEST_STATS_NONE = 1 - REQUEST_STATS_FULL = 2 - - table_name: str = proto.Field( - proto.STRING, - number=1, - ) - app_profile_id: str = proto.Field( - proto.STRING, - number=5, - ) - rows: data.RowSet = proto.Field( - proto.MESSAGE, - number=2, - message=data.RowSet, - ) - filter: data.RowFilter = proto.Field( - proto.MESSAGE, - number=3, - message=data.RowFilter, - ) - rows_limit: int = proto.Field( - proto.INT64, - number=4, - ) - request_stats_view: RequestStatsView = proto.Field( - proto.ENUM, - number=6, - enum=RequestStatsView, - ) - reversed: bool = proto.Field( - proto.BOOL, - number=7, - ) - - -class ReadRowsResponse(proto.Message): - r"""Response message for Bigtable.ReadRows. - - Attributes: - chunks (MutableSequence[google.cloud.bigtable_v2.types.ReadRowsResponse.CellChunk]): - A collection of a row's contents as part of - the read request. - last_scanned_row_key (bytes): - Optionally the server might return the row - key of the last row it has scanned. The client - can use this to construct a more efficient retry - request if needed: any row keys or portions of - ranges less than this row key can be dropped - from the request. This is primarily useful for - cases where the server has read a lot of data - that was filtered out since the last committed - row key, allowing the client to skip that work - on a retry. - request_stats (google.cloud.bigtable_v2.types.RequestStats): - If requested, provide enhanced query performance statistics. - The semantics dictate: - - - request_stats is empty on every (streamed) response, - except - - request_stats has non-empty information after all chunks - have been streamed, where the ReadRowsResponse message - only contains request_stats. - - - For example, if a read request would have returned an - empty response instead a single ReadRowsResponse is - streamed with empty chunks and request_stats filled. - - Visually, response messages will stream as follows: ... -> - {chunks: [...]} -> {chunks: [], request_stats: {...}} - \_\ **/ \_**\ \__________/ Primary response Trailer of - RequestStats info - - Or if the read did not return any values: {chunks: [], - request_stats: {...}} \________________________________/ - Trailer of RequestStats info - """ - - class CellChunk(proto.Message): - r"""Specifies a piece of a row's contents returned as part of the - read response stream. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - row_key (bytes): - The row key for this chunk of data. If the - row key is empty, this CellChunk is a - continuation of the same row as the previous - CellChunk in the response stream, even if that - CellChunk was in a previous ReadRowsResponse - message. - family_name (google.protobuf.wrappers_pb2.StringValue): - The column family name for this chunk of data. If this - message is not present this CellChunk is a continuation of - the same column family as the previous CellChunk. The empty - string can occur as a column family name in a response so - clients must check explicitly for the presence of this - message, not just for ``family_name.value`` being non-empty. - qualifier (google.protobuf.wrappers_pb2.BytesValue): - The column qualifier for this chunk of data. If this message - is not present, this CellChunk is a continuation of the same - column as the previous CellChunk. Column qualifiers may be - empty so clients must check for the presence of this - message, not just for ``qualifier.value`` being non-empty. - timestamp_micros (int): - The cell's stored timestamp, which also uniquely identifies - it within its column. Values are always expressed in - microseconds, but individual tables may set a coarser - granularity to further restrict the allowed values. For - example, a table which specifies millisecond granularity - will only allow values of ``timestamp_micros`` which are - multiples of 1000. Timestamps are only set in the first - CellChunk per cell (for cells split into multiple chunks). - labels (MutableSequence[str]): - Labels applied to the cell by a - [RowFilter][google.bigtable.v2.RowFilter]. Labels are only - set on the first CellChunk per cell. - value (bytes): - The value stored in the cell. Cell values - can be split across multiple CellChunks. In - that case only the value field will be set in - CellChunks after the first: the timestamp and - labels will only be present in the first - CellChunk, even if the first CellChunk came in a - previous ReadRowsResponse. - value_size (int): - If this CellChunk is part of a chunked cell value and this - is not the final chunk of that cell, value_size will be set - to the total length of the cell value. The client can use - this size to pre-allocate memory to hold the full cell - value. - reset_row (bool): - Indicates that the client should drop all previous chunks - for ``row_key``, as it will be re-read from the beginning. - - This field is a member of `oneof`_ ``row_status``. - commit_row (bool): - Indicates that the client can safely process all previous - chunks for ``row_key``, as its data has been fully read. - - This field is a member of `oneof`_ ``row_status``. - """ - - row_key: bytes = proto.Field( - proto.BYTES, - number=1, - ) - family_name: wrappers_pb2.StringValue = proto.Field( - proto.MESSAGE, - number=2, - message=wrappers_pb2.StringValue, - ) - qualifier: wrappers_pb2.BytesValue = proto.Field( - proto.MESSAGE, - number=3, - message=wrappers_pb2.BytesValue, - ) - timestamp_micros: int = proto.Field( - proto.INT64, - number=4, - ) - labels: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=5, - ) - value: bytes = proto.Field( - proto.BYTES, - number=6, - ) - value_size: int = proto.Field( - proto.INT32, - number=7, - ) - reset_row: bool = proto.Field( - proto.BOOL, - number=8, - oneof='row_status', - ) - commit_row: bool = proto.Field( - proto.BOOL, - number=9, - oneof='row_status', - ) - - chunks: MutableSequence[CellChunk] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=CellChunk, - ) - last_scanned_row_key: bytes = proto.Field( - proto.BYTES, - number=2, - ) - request_stats: gb_request_stats.RequestStats = proto.Field( - proto.MESSAGE, - number=3, - message=gb_request_stats.RequestStats, - ) - - -class SampleRowKeysRequest(proto.Message): - r"""Request message for Bigtable.SampleRowKeys. - - Attributes: - table_name (str): - Required. The unique name of the table from which to sample - row keys. Values are of the form - ``projects//instances//tables/
``. - app_profile_id (str): - This value specifies routing for replication. - If not specified, the "default" application - profile will be used. - """ - - table_name: str = proto.Field( - proto.STRING, - number=1, - ) - app_profile_id: str = proto.Field( - proto.STRING, - number=2, - ) - - -class SampleRowKeysResponse(proto.Message): - r"""Response message for Bigtable.SampleRowKeys. - - Attributes: - row_key (bytes): - Sorted streamed sequence of sample row keys - in the table. The table might have contents - before the first row key in the list and after - the last one, but a key containing the empty - string indicates "end of table" and will be the - last response given, if present. - Note that row keys in this list may not have - ever been written to or read from, and users - should therefore not make any assumptions about - the row key structure that are specific to their - use case. - offset_bytes (int): - Approximate total storage space used by all rows in the - table which precede ``row_key``. Buffering the contents of - all rows between two subsequent samples would require space - roughly equal to the difference in their ``offset_bytes`` - fields. - """ - - row_key: bytes = proto.Field( - proto.BYTES, - number=1, - ) - offset_bytes: int = proto.Field( - proto.INT64, - number=2, - ) - - -class MutateRowRequest(proto.Message): - r"""Request message for Bigtable.MutateRow. - - Attributes: - table_name (str): - Required. The unique name of the table to which the mutation - should be applied. Values are of the form - ``projects//instances//tables/
``. - app_profile_id (str): - This value specifies routing for replication. - If not specified, the "default" application - profile will be used. - row_key (bytes): - Required. The key of the row to which the - mutation should be applied. - mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): - Required. Changes to be atomically applied to - the specified row. Entries are applied in order, - meaning that earlier mutations can be masked by - later ones. Must contain at least one entry and - at most 100000. - """ - - table_name: str = proto.Field( - proto.STRING, - number=1, - ) - app_profile_id: str = proto.Field( - proto.STRING, - number=4, - ) - row_key: bytes = proto.Field( - proto.BYTES, - number=2, - ) - mutations: MutableSequence[data.Mutation] = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=data.Mutation, - ) - - -class MutateRowResponse(proto.Message): - r"""Response message for Bigtable.MutateRow. - """ - - -class MutateRowsRequest(proto.Message): - r"""Request message for BigtableService.MutateRows. - - Attributes: - table_name (str): - Required. The unique name of the table to - which the mutations should be applied. - app_profile_id (str): - This value specifies routing for replication. - If not specified, the "default" application - profile will be used. - entries (MutableSequence[google.cloud.bigtable_v2.types.MutateRowsRequest.Entry]): - Required. The row keys and corresponding - mutations to be applied in bulk. Each entry is - applied as an atomic mutation, but the entries - may be applied in arbitrary order (even between - entries for the same row). At least one entry - must be specified, and in total the entries can - contain at most 100000 mutations. - """ - - class Entry(proto.Message): - r"""A mutation for a given row. - - Attributes: - row_key (bytes): - The key of the row to which the ``mutations`` should be - applied. - mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): - Required. Changes to be atomically applied to - the specified row. Mutations are applied in - order, meaning that earlier mutations can be - masked by later ones. You must specify at least - one mutation. - """ - - row_key: bytes = proto.Field( - proto.BYTES, - number=1, - ) - mutations: MutableSequence[data.Mutation] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=data.Mutation, - ) - - table_name: str = proto.Field( - proto.STRING, - number=1, - ) - app_profile_id: str = proto.Field( - proto.STRING, - number=3, - ) - entries: MutableSequence[Entry] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=Entry, - ) - - -class MutateRowsResponse(proto.Message): - r"""Response message for BigtableService.MutateRows. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - entries (MutableSequence[google.cloud.bigtable_v2.types.MutateRowsResponse.Entry]): - One or more results for Entries from the - batch request. - rate_limit_info (google.cloud.bigtable_v2.types.RateLimitInfo): - Information about how client should limit the - rate (QPS). Primirily used by supported official - Cloud Bigtable clients. If unset, the rate limit - info is not provided by the server. - - This field is a member of `oneof`_ ``_rate_limit_info``. - """ - - class Entry(proto.Message): - r"""The result of applying a passed mutation in the original - request. - - Attributes: - index (int): - The index into the original request's ``entries`` list of - the Entry for which a result is being reported. - status (google.rpc.status_pb2.Status): - The result of the request Entry identified by ``index``. - Depending on how requests are batched during execution, it - is possible for one Entry to fail due to an error with - another Entry. In the event that this occurs, the same error - will be reported for both entries. - """ - - index: int = proto.Field( - proto.INT64, - number=1, - ) - status: status_pb2.Status = proto.Field( - proto.MESSAGE, - number=2, - message=status_pb2.Status, - ) - - entries: MutableSequence[Entry] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=Entry, - ) - rate_limit_info: 'RateLimitInfo' = proto.Field( - proto.MESSAGE, - number=3, - optional=True, - message='RateLimitInfo', - ) - - -class RateLimitInfo(proto.Message): - r"""Information about how client should adjust the load to - Bigtable. - - Attributes: - period (google.protobuf.duration_pb2.Duration): - Time that clients should wait before - adjusting the target rate again. If clients - adjust rate too frequently, the impact of the - previous adjustment may not have been taken into - account and may over-throttle or under-throttle. - If clients adjust rate too slowly, they will not - be responsive to load changes on server side, - and may over-throttle or under-throttle. - factor (float): - If it has been at least one ``period`` since the last load - adjustment, the client should multiply the current load by - this value to get the new target load. For example, if the - current load is 100 and ``factor`` is 0.8, the new target - load should be 80. After adjusting, the client should ignore - ``factor`` until another ``period`` has passed. - - The client can measure its load using any unit that's - comparable over time For example, QPS can be used as long as - each request involves a similar amount of work. - """ - - period: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - factor: float = proto.Field( - proto.DOUBLE, - number=2, - ) - - -class CheckAndMutateRowRequest(proto.Message): - r"""Request message for Bigtable.CheckAndMutateRow. - - Attributes: - table_name (str): - Required. The unique name of the table to which the - conditional mutation should be applied. Values are of the - form - ``projects//instances//tables/
``. - app_profile_id (str): - This value specifies routing for replication. - If not specified, the "default" application - profile will be used. - row_key (bytes): - Required. The key of the row to which the - conditional mutation should be applied. - predicate_filter (google.cloud.bigtable_v2.types.RowFilter): - The filter to be applied to the contents of the specified - row. Depending on whether or not any results are yielded, - either ``true_mutations`` or ``false_mutations`` will be - executed. If unset, checks that the row contains any values - at all. - true_mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): - Changes to be atomically applied to the specified row if - ``predicate_filter`` yields at least one cell when applied - to ``row_key``. Entries are applied in order, meaning that - earlier mutations can be masked by later ones. Must contain - at least one entry if ``false_mutations`` is empty, and at - most 100000. - false_mutations (MutableSequence[google.cloud.bigtable_v2.types.Mutation]): - Changes to be atomically applied to the specified row if - ``predicate_filter`` does not yield any cells when applied - to ``row_key``. Entries are applied in order, meaning that - earlier mutations can be masked by later ones. Must contain - at least one entry if ``true_mutations`` is empty, and at - most 100000. - """ - - table_name: str = proto.Field( - proto.STRING, - number=1, - ) - app_profile_id: str = proto.Field( - proto.STRING, - number=7, - ) - row_key: bytes = proto.Field( - proto.BYTES, - number=2, - ) - predicate_filter: data.RowFilter = proto.Field( - proto.MESSAGE, - number=6, - message=data.RowFilter, - ) - true_mutations: MutableSequence[data.Mutation] = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=data.Mutation, - ) - false_mutations: MutableSequence[data.Mutation] = proto.RepeatedField( - proto.MESSAGE, - number=5, - message=data.Mutation, - ) - - -class CheckAndMutateRowResponse(proto.Message): - r"""Response message for Bigtable.CheckAndMutateRow. - - Attributes: - predicate_matched (bool): - Whether or not the request's ``predicate_filter`` yielded - any results for the specified row. - """ - - predicate_matched: bool = proto.Field( - proto.BOOL, - number=1, - ) - - -class PingAndWarmRequest(proto.Message): - r"""Request message for client connection keep-alive and warming. - - Attributes: - name (str): - Required. The unique name of the instance to check - permissions for as well as respond. Values are of the form - ``projects//instances/``. - app_profile_id (str): - This value specifies routing for replication. - If not specified, the "default" application - profile will be used. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - app_profile_id: str = proto.Field( - proto.STRING, - number=2, - ) - - -class PingAndWarmResponse(proto.Message): - r"""Response message for Bigtable.PingAndWarm connection - keepalive and warming. - - """ - - -class ReadModifyWriteRowRequest(proto.Message): - r"""Request message for Bigtable.ReadModifyWriteRow. - - Attributes: - table_name (str): - Required. The unique name of the table to which the - read/modify/write rules should be applied. Values are of the - form - ``projects//instances//tables/
``. - app_profile_id (str): - This value specifies routing for replication. - If not specified, the "default" application - profile will be used. - row_key (bytes): - Required. The key of the row to which the - read/modify/write rules should be applied. - rules (MutableSequence[google.cloud.bigtable_v2.types.ReadModifyWriteRule]): - Required. Rules specifying how the specified - row's contents are to be transformed into - writes. Entries are applied in order, meaning - that earlier rules will affect the results of - later ones. - """ - - table_name: str = proto.Field( - proto.STRING, - number=1, - ) - app_profile_id: str = proto.Field( - proto.STRING, - number=4, - ) - row_key: bytes = proto.Field( - proto.BYTES, - number=2, - ) - rules: MutableSequence[data.ReadModifyWriteRule] = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=data.ReadModifyWriteRule, - ) - - -class ReadModifyWriteRowResponse(proto.Message): - r"""Response message for Bigtable.ReadModifyWriteRow. - - Attributes: - row (google.cloud.bigtable_v2.types.Row): - A Row containing the new contents of all - cells modified by the request. - """ - - row: data.Row = proto.Field( - proto.MESSAGE, - number=1, - message=data.Row, - ) - - -class GenerateInitialChangeStreamPartitionsRequest(proto.Message): - r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Request message for - Bigtable.GenerateInitialChangeStreamPartitions. - - Attributes: - table_name (str): - Required. The unique name of the table from which to get - change stream partitions. Values are of the form - ``projects//instances//tables/
``. - Change streaming must be enabled on the table. - app_profile_id (str): - This value specifies routing for replication. - If not specified, the "default" application - profile will be used. Single cluster routing - must be configured on the profile. - """ - - table_name: str = proto.Field( - proto.STRING, - number=1, - ) - app_profile_id: str = proto.Field( - proto.STRING, - number=2, - ) - - -class GenerateInitialChangeStreamPartitionsResponse(proto.Message): - r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Response message for - Bigtable.GenerateInitialChangeStreamPartitions. - - Attributes: - partition (google.cloud.bigtable_v2.types.StreamPartition): - A partition of the change stream. - """ - - partition: data.StreamPartition = proto.Field( - proto.MESSAGE, - number=1, - message=data.StreamPartition, - ) - - -class ReadChangeStreamRequest(proto.Message): - r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Request message for Bigtable.ReadChangeStream. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - table_name (str): - Required. The unique name of the table from which to read a - change stream. Values are of the form - ``projects//instances//tables/
``. - Change streaming must be enabled on the table. - app_profile_id (str): - This value specifies routing for replication. - If not specified, the "default" application - profile will be used. Single cluster routing - must be configured on the profile. - partition (google.cloud.bigtable_v2.types.StreamPartition): - The partition to read changes from. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Start reading the stream at the specified - timestamp. This timestamp must be within the - change stream retention period, less than or - equal to the current time, and after change - stream creation, whichever is greater. This - value is inclusive and will be truncated to - microsecond granularity. - - This field is a member of `oneof`_ ``start_from``. - continuation_tokens (google.cloud.bigtable_v2.types.StreamContinuationTokens): - Tokens that describe how to resume reading a stream where - reading previously left off. If specified, changes will be - read starting at the the position. Tokens are delivered on - the stream as part of ``Heartbeat`` and ``CloseStream`` - messages. - - If a single token is provided, the token’s partition must - exactly match the request’s partition. If multiple tokens - are provided, as in the case of a partition merge, the union - of the token partitions must exactly cover the request’s - partition. Otherwise, INVALID_ARGUMENT will be returned. - - This field is a member of `oneof`_ ``start_from``. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If specified, OK will be returned when the - stream advances beyond this time. Otherwise, - changes will be continuously delivered on the - stream. This value is inclusive and will be - truncated to microsecond granularity. - heartbeat_duration (google.protobuf.duration_pb2.Duration): - If specified, the duration between ``Heartbeat`` messages on - the stream. Otherwise, defaults to 5 seconds. - """ - - table_name: str = proto.Field( - proto.STRING, - number=1, - ) - app_profile_id: str = proto.Field( - proto.STRING, - number=2, - ) - partition: data.StreamPartition = proto.Field( - proto.MESSAGE, - number=3, - message=data.StreamPartition, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - oneof='start_from', - message=timestamp_pb2.Timestamp, - ) - continuation_tokens: data.StreamContinuationTokens = proto.Field( - proto.MESSAGE, - number=6, - oneof='start_from', - message=data.StreamContinuationTokens, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - heartbeat_duration: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=7, - message=duration_pb2.Duration, - ) - - -class ReadChangeStreamResponse(proto.Message): - r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. Response message for Bigtable.ReadChangeStream. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - data_change (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.DataChange): - A mutation to the partition. - - This field is a member of `oneof`_ ``stream_record``. - heartbeat (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.Heartbeat): - A periodic heartbeat message. - - This field is a member of `oneof`_ ``stream_record``. - close_stream (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.CloseStream): - An indication that the stream should be - closed. - - This field is a member of `oneof`_ ``stream_record``. - """ - - class MutationChunk(proto.Message): - r"""A partial or complete mutation. - - Attributes: - chunk_info (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.MutationChunk.ChunkInfo): - If set, then the mutation is a ``SetCell`` with a chunked - value across multiple messages. - mutation (google.cloud.bigtable_v2.types.Mutation): - If this is a continuation of a chunked message - (``chunked_value_offset`` > 0), ignore all fields except the - ``SetCell``'s value and merge it with the previous message - by concatenating the value fields. - """ - - class ChunkInfo(proto.Message): - r"""Information about the chunking of this mutation. Only ``SetCell`` - mutations can be chunked, and all chunks for a ``SetCell`` will be - delivered contiguously with no other mutation types interleaved. - - Attributes: - chunked_value_size (int): - The total value size of all the chunks that make up the - ``SetCell``. - chunked_value_offset (int): - The byte offset of this chunk into the total - value size of the mutation. - last_chunk (bool): - When true, this is the last chunk of a chunked ``SetCell``. - """ - - chunked_value_size: int = proto.Field( - proto.INT32, - number=1, - ) - chunked_value_offset: int = proto.Field( - proto.INT32, - number=2, - ) - last_chunk: bool = proto.Field( - proto.BOOL, - number=3, - ) - - chunk_info: 'ReadChangeStreamResponse.MutationChunk.ChunkInfo' = proto.Field( - proto.MESSAGE, - number=1, - message='ReadChangeStreamResponse.MutationChunk.ChunkInfo', - ) - mutation: data.Mutation = proto.Field( - proto.MESSAGE, - number=2, - message=data.Mutation, - ) - - class DataChange(proto.Message): - r"""A message corresponding to one or more mutations to the partition - being streamed. A single logical ``DataChange`` message may also be - split across a sequence of multiple individual messages. Messages - other than the first in a sequence will only have the ``type`` and - ``chunks`` fields populated, with the final message in the sequence - also containing ``done`` set to true. - - Attributes: - type_ (google.cloud.bigtable_v2.types.ReadChangeStreamResponse.DataChange.Type): - The type of the mutation. - source_cluster_id (str): - The cluster where the mutation was applied. Not set when - ``type`` is ``GARBAGE_COLLECTION``. - row_key (bytes): - The row key for all mutations that are part of this - ``DataChange``. If the ``DataChange`` is chunked across - multiple messages, then this field will only be set for the - first message. - commit_timestamp (google.protobuf.timestamp_pb2.Timestamp): - The timestamp at which the mutation was - applied on the Bigtable server. - tiebreaker (int): - A value that lets stream consumers reconstruct Bigtable's - conflict resolution semantics. - https://cloud.google.com/bigtable/docs/writes#conflict-resolution - In the event that the same row key, column family, column - qualifier, timestamp are modified on different clusters at - the same ``commit_timestamp``, the mutation with the larger - ``tiebreaker`` will be the one chosen for the eventually - consistent state of the system. - chunks (MutableSequence[google.cloud.bigtable_v2.types.ReadChangeStreamResponse.MutationChunk]): - The mutations associated with this change to the partition. - May contain complete mutations or chunks of a multi-message - chunked ``DataChange`` record. - done (bool): - When true, indicates that the entire ``DataChange`` has been - read and the client can safely process the message. - token (str): - An encoded position for this stream's - partition to restart reading from. This token is - for the StreamPartition from the request. - estimated_low_watermark (google.protobuf.timestamp_pb2.Timestamp): - An estimate of the commit timestamp that is - usually lower than or equal to any timestamp for - a record that will be delivered in the future on - the stream. It is possible that, under - particular circumstances that a future record - has a timestamp is is lower than a previously - seen timestamp. For an example usage see - https://beam.apache.org/documentation/basics/#watermarks - """ - class Type(proto.Enum): - r"""The type of mutation. - - Values: - TYPE_UNSPECIFIED (0): - The type is unspecified. - USER (1): - A user-initiated mutation. - GARBAGE_COLLECTION (2): - A system-initiated mutation as part of - garbage collection. - https://cloud.google.com/bigtable/docs/garbage-collection - CONTINUATION (3): - This is a continuation of a multi-message - change. - """ - TYPE_UNSPECIFIED = 0 - USER = 1 - GARBAGE_COLLECTION = 2 - CONTINUATION = 3 - - type_: 'ReadChangeStreamResponse.DataChange.Type' = proto.Field( - proto.ENUM, - number=1, - enum='ReadChangeStreamResponse.DataChange.Type', - ) - source_cluster_id: str = proto.Field( - proto.STRING, - number=2, - ) - row_key: bytes = proto.Field( - proto.BYTES, - number=3, - ) - commit_timestamp: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - tiebreaker: int = proto.Field( - proto.INT32, - number=5, - ) - chunks: MutableSequence['ReadChangeStreamResponse.MutationChunk'] = proto.RepeatedField( - proto.MESSAGE, - number=6, - message='ReadChangeStreamResponse.MutationChunk', - ) - done: bool = proto.Field( - proto.BOOL, - number=8, - ) - token: str = proto.Field( - proto.STRING, - number=9, - ) - estimated_low_watermark: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=10, - message=timestamp_pb2.Timestamp, - ) - - class Heartbeat(proto.Message): - r"""A periodic message with information that can be used to - checkpoint the state of a stream. - - Attributes: - continuation_token (google.cloud.bigtable_v2.types.StreamContinuationToken): - A token that can be provided to a subsequent - ``ReadChangeStream`` call to pick up reading at the current - stream position. - estimated_low_watermark (google.protobuf.timestamp_pb2.Timestamp): - An estimate of the commit timestamp that is - usually lower than or equal to any timestamp for - a record that will be delivered in the future on - the stream. It is possible that, under - particular circumstances that a future record - has a timestamp is is lower than a previously - seen timestamp. For an example usage see - https://beam.apache.org/documentation/basics/#watermarks - """ - - continuation_token: data.StreamContinuationToken = proto.Field( - proto.MESSAGE, - number=1, - message=data.StreamContinuationToken, - ) - estimated_low_watermark: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - - class CloseStream(proto.Message): - r"""A message indicating that the client should stop reading from the - stream. If status is OK and ``continuation_tokens`` & - ``new_partitions`` are empty, the stream has finished (for example - if there was an ``end_time`` specified). If ``continuation_tokens`` - & ``new_partitions`` are present, then a change in partitioning - requires the client to open a new stream for each token to resume - reading. Example: [B, D) ends \| v new_partitions: [A, C) [C, E) - continuation_tokens.partitions: [B,C) [C,D) ^---^ ^---^ ^ ^ \| \| \| - StreamContinuationToken 2 \| StreamContinuationToken 1 To read the - new partition [A,C), supply the continuation tokens whose ranges - cover the new partition, for example ContinuationToken[A,B) & - ContinuationToken[B,C). - - Attributes: - status (google.rpc.status_pb2.Status): - The status of the stream. - continuation_tokens (MutableSequence[google.cloud.bigtable_v2.types.StreamContinuationToken]): - If non-empty, contains the information needed - to resume reading their associated partitions. - new_partitions (MutableSequence[google.cloud.bigtable_v2.types.StreamPartition]): - If non-empty, contains the new partitions to start reading - from, which are related to but not necessarily identical to - the partitions for the above ``continuation_tokens``. - """ - - status: status_pb2.Status = proto.Field( - proto.MESSAGE, - number=1, - message=status_pb2.Status, - ) - continuation_tokens: MutableSequence[data.StreamContinuationToken] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=data.StreamContinuationToken, - ) - new_partitions: MutableSequence[data.StreamPartition] = proto.RepeatedField( - proto.MESSAGE, - number=3, - message=data.StreamPartition, - ) - - data_change: DataChange = proto.Field( - proto.MESSAGE, - number=1, - oneof='stream_record', - message=DataChange, - ) - heartbeat: Heartbeat = proto.Field( - proto.MESSAGE, - number=2, - oneof='stream_record', - message=Heartbeat, - ) - close_stream: CloseStream = proto.Field( - proto.MESSAGE, - number=3, - oneof='stream_record', - message=CloseStream, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/data.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/data.py deleted file mode 100644 index 0211a971e..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/data.py +++ /dev/null @@ -1,1102 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.bigtable.v2', - manifest={ - 'Row', - 'Family', - 'Column', - 'Cell', - 'RowRange', - 'RowSet', - 'ColumnRange', - 'TimestampRange', - 'ValueRange', - 'RowFilter', - 'Mutation', - 'ReadModifyWriteRule', - 'StreamPartition', - 'StreamContinuationTokens', - 'StreamContinuationToken', - }, -) - - -class Row(proto.Message): - r"""Specifies the complete (requested) contents of a single row - of a table. Rows which exceed 256MiB in size cannot be read in - full. - - Attributes: - key (bytes): - The unique key which identifies this row - within its table. This is the same key that's - used to identify the row in, for example, a - MutateRowRequest. May contain any non-empty byte - string up to 4KiB in length. - families (MutableSequence[google.cloud.bigtable_v2.types.Family]): - May be empty, but only if the entire row is - empty. The mutual ordering of column families is - not specified. - """ - - key: bytes = proto.Field( - proto.BYTES, - number=1, - ) - families: MutableSequence['Family'] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='Family', - ) - - -class Family(proto.Message): - r"""Specifies (some of) the contents of a single row/column - family intersection of a table. - - Attributes: - name (str): - The unique key which identifies this family within its row. - This is the same key that's used to identify the family in, - for example, a RowFilter which sets its - "family_name_regex_filter" field. Must match - ``[-_.a-zA-Z0-9]+``, except that AggregatingRowProcessors - may produce cells in a sentinel family with an empty name. - Must be no greater than 64 characters in length. - columns (MutableSequence[google.cloud.bigtable_v2.types.Column]): - Must not be empty. Sorted in order of - increasing "qualifier". - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - columns: MutableSequence['Column'] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='Column', - ) - - -class Column(proto.Message): - r"""Specifies (some of) the contents of a single row/column - intersection of a table. - - Attributes: - qualifier (bytes): - The unique key which identifies this column within its - family. This is the same key that's used to identify the - column in, for example, a RowFilter which sets its - ``column_qualifier_regex_filter`` field. May contain any - byte string, including the empty string, up to 16kiB in - length. - cells (MutableSequence[google.cloud.bigtable_v2.types.Cell]): - Must not be empty. Sorted in order of decreasing - "timestamp_micros". - """ - - qualifier: bytes = proto.Field( - proto.BYTES, - number=1, - ) - cells: MutableSequence['Cell'] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='Cell', - ) - - -class Cell(proto.Message): - r"""Specifies (some of) the contents of a single - row/column/timestamp of a table. - - Attributes: - timestamp_micros (int): - The cell's stored timestamp, which also uniquely identifies - it within its column. Values are always expressed in - microseconds, but individual tables may set a coarser - granularity to further restrict the allowed values. For - example, a table which specifies millisecond granularity - will only allow values of ``timestamp_micros`` which are - multiples of 1000. - value (bytes): - The value stored in the cell. - May contain any byte string, including the empty - string, up to 100MiB in length. - labels (MutableSequence[str]): - Labels applied to the cell by a - [RowFilter][google.bigtable.v2.RowFilter]. - """ - - timestamp_micros: int = proto.Field( - proto.INT64, - number=1, - ) - value: bytes = proto.Field( - proto.BYTES, - number=2, - ) - labels: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=3, - ) - - -class RowRange(proto.Message): - r"""Specifies a contiguous range of rows. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - start_key_closed (bytes): - Used when giving an inclusive lower bound for - the range. - - This field is a member of `oneof`_ ``start_key``. - start_key_open (bytes): - Used when giving an exclusive lower bound for - the range. - - This field is a member of `oneof`_ ``start_key``. - end_key_open (bytes): - Used when giving an exclusive upper bound for - the range. - - This field is a member of `oneof`_ ``end_key``. - end_key_closed (bytes): - Used when giving an inclusive upper bound for - the range. - - This field is a member of `oneof`_ ``end_key``. - """ - - start_key_closed: bytes = proto.Field( - proto.BYTES, - number=1, - oneof='start_key', - ) - start_key_open: bytes = proto.Field( - proto.BYTES, - number=2, - oneof='start_key', - ) - end_key_open: bytes = proto.Field( - proto.BYTES, - number=3, - oneof='end_key', - ) - end_key_closed: bytes = proto.Field( - proto.BYTES, - number=4, - oneof='end_key', - ) - - -class RowSet(proto.Message): - r"""Specifies a non-contiguous set of rows. - - Attributes: - row_keys (MutableSequence[bytes]): - Single rows included in the set. - row_ranges (MutableSequence[google.cloud.bigtable_v2.types.RowRange]): - Contiguous row ranges included in the set. - """ - - row_keys: MutableSequence[bytes] = proto.RepeatedField( - proto.BYTES, - number=1, - ) - row_ranges: MutableSequence['RowRange'] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='RowRange', - ) - - -class ColumnRange(proto.Message): - r"""Specifies a contiguous range of columns within a single column - family. The range spans from : to - :, where both bounds can be either - inclusive or exclusive. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - family_name (str): - The name of the column family within which - this range falls. - start_qualifier_closed (bytes): - Used when giving an inclusive lower bound for - the range. - - This field is a member of `oneof`_ ``start_qualifier``. - start_qualifier_open (bytes): - Used when giving an exclusive lower bound for - the range. - - This field is a member of `oneof`_ ``start_qualifier``. - end_qualifier_closed (bytes): - Used when giving an inclusive upper bound for - the range. - - This field is a member of `oneof`_ ``end_qualifier``. - end_qualifier_open (bytes): - Used when giving an exclusive upper bound for - the range. - - This field is a member of `oneof`_ ``end_qualifier``. - """ - - family_name: str = proto.Field( - proto.STRING, - number=1, - ) - start_qualifier_closed: bytes = proto.Field( - proto.BYTES, - number=2, - oneof='start_qualifier', - ) - start_qualifier_open: bytes = proto.Field( - proto.BYTES, - number=3, - oneof='start_qualifier', - ) - end_qualifier_closed: bytes = proto.Field( - proto.BYTES, - number=4, - oneof='end_qualifier', - ) - end_qualifier_open: bytes = proto.Field( - proto.BYTES, - number=5, - oneof='end_qualifier', - ) - - -class TimestampRange(proto.Message): - r"""Specified a contiguous range of microsecond timestamps. - - Attributes: - start_timestamp_micros (int): - Inclusive lower bound. If left empty, - interpreted as 0. - end_timestamp_micros (int): - Exclusive upper bound. If left empty, - interpreted as infinity. - """ - - start_timestamp_micros: int = proto.Field( - proto.INT64, - number=1, - ) - end_timestamp_micros: int = proto.Field( - proto.INT64, - number=2, - ) - - -class ValueRange(proto.Message): - r"""Specifies a contiguous range of raw byte values. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - start_value_closed (bytes): - Used when giving an inclusive lower bound for - the range. - - This field is a member of `oneof`_ ``start_value``. - start_value_open (bytes): - Used when giving an exclusive lower bound for - the range. - - This field is a member of `oneof`_ ``start_value``. - end_value_closed (bytes): - Used when giving an inclusive upper bound for - the range. - - This field is a member of `oneof`_ ``end_value``. - end_value_open (bytes): - Used when giving an exclusive upper bound for - the range. - - This field is a member of `oneof`_ ``end_value``. - """ - - start_value_closed: bytes = proto.Field( - proto.BYTES, - number=1, - oneof='start_value', - ) - start_value_open: bytes = proto.Field( - proto.BYTES, - number=2, - oneof='start_value', - ) - end_value_closed: bytes = proto.Field( - proto.BYTES, - number=3, - oneof='end_value', - ) - end_value_open: bytes = proto.Field( - proto.BYTES, - number=4, - oneof='end_value', - ) - - -class RowFilter(proto.Message): - r"""Takes a row as input and produces an alternate view of the row based - on specified rules. For example, a RowFilter might trim down a row - to include just the cells from columns matching a given regular - expression, or might return all the cells of a row but not their - values. More complicated filters can be composed out of these - components to express requests such as, "within every column of a - particular family, give just the two most recent cells which are - older than timestamp X." - - There are two broad categories of RowFilters (true filters and - transformers), as well as two ways to compose simple filters into - more complex ones (chains and interleaves). They work as follows: - - - True filters alter the input row by excluding some of its cells - wholesale from the output row. An example of a true filter is the - ``value_regex_filter``, which excludes cells whose values don't - match the specified pattern. All regex true filters use RE2 - syntax (https://github.com/google/re2/wiki/Syntax) in raw byte - mode (RE2::Latin1), and are evaluated as full matches. An - important point to keep in mind is that ``RE2(.)`` is equivalent - by default to ``RE2([^\n])``, meaning that it does not match - newlines. When attempting to match an arbitrary byte, you should - therefore use the escape sequence ``\C``, which may need to be - further escaped as ``\\C`` in your client language. - - - Transformers alter the input row by changing the values of some - of its cells in the output, without excluding them completely. - Currently, the only supported transformer is the - ``strip_value_transformer``, which replaces every cell's value - with the empty string. - - - Chains and interleaves are described in more detail in the - RowFilter.Chain and RowFilter.Interleave documentation. - - The total serialized size of a RowFilter message must not exceed - 20480 bytes, and RowFilters may not be nested within each other (in - Chains or Interleaves) to a depth of more than 20. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - chain (google.cloud.bigtable_v2.types.RowFilter.Chain): - Applies several RowFilters to the data in - sequence, progressively narrowing the results. - - This field is a member of `oneof`_ ``filter``. - interleave (google.cloud.bigtable_v2.types.RowFilter.Interleave): - Applies several RowFilters to the data in - parallel and combines the results. - - This field is a member of `oneof`_ ``filter``. - condition (google.cloud.bigtable_v2.types.RowFilter.Condition): - Applies one of two possible RowFilters to the - data based on the output of a predicate - RowFilter. - - This field is a member of `oneof`_ ``filter``. - sink (bool): - ADVANCED USE ONLY. Hook for introspection into the - RowFilter. Outputs all cells directly to the output of the - read rather than to any parent filter. Consider the - following example: - - :: - - Chain( - FamilyRegex("A"), - Interleave( - All(), - Chain(Label("foo"), Sink()) - ), - QualifierRegex("B") - ) - - A,A,1,w - A,B,2,x - B,B,4,z - | - FamilyRegex("A") - | - A,A,1,w - A,B,2,x - | - +------------+-------------+ - | | - All() Label(foo) - | | - A,A,1,w A,A,1,w,labels:[foo] - A,B,2,x A,B,2,x,labels:[foo] - | | - | Sink() --------------+ - | | | - +------------+ x------+ A,A,1,w,labels:[foo] - | A,B,2,x,labels:[foo] - A,A,1,w | - A,B,2,x | - | | - QualifierRegex("B") | - | | - A,B,2,x | - | | - +--------------------------------+ - | - A,A,1,w,labels:[foo] - A,B,2,x,labels:[foo] // could be switched - A,B,2,x // could be switched - - Despite being excluded by the qualifier filter, a copy of - every cell that reaches the sink is present in the final - result. - - As with an - [Interleave][google.bigtable.v2.RowFilter.Interleave], - duplicate cells are possible, and appear in an unspecified - mutual order. In this case we have a duplicate with column - "A:B" and timestamp 2, because one copy passed through the - all filter while the other was passed through the label and - sink. Note that one copy has label "foo", while the other - does not. - - Cannot be used within the ``predicate_filter``, - ``true_filter``, or ``false_filter`` of a - [Condition][google.bigtable.v2.RowFilter.Condition]. - - This field is a member of `oneof`_ ``filter``. - pass_all_filter (bool): - Matches all cells, regardless of input. Functionally - equivalent to leaving ``filter`` unset, but included for - completeness. - - This field is a member of `oneof`_ ``filter``. - block_all_filter (bool): - Does not match any cells, regardless of - input. Useful for temporarily disabling just - part of a filter. - - This field is a member of `oneof`_ ``filter``. - row_key_regex_filter (bytes): - Matches only cells from rows whose keys satisfy the given - RE2 regex. In other words, passes through the entire row - when the key matches, and otherwise produces an empty row. - Note that, since row keys can contain arbitrary bytes, the - ``\C`` escape sequence must be used if a true wildcard is - desired. The ``.`` character will not match the new line - character ``\n``, which may be present in a binary key. - - This field is a member of `oneof`_ ``filter``. - row_sample_filter (float): - Matches all cells from a row with probability - p, and matches no cells from the row with - probability 1-p. - - This field is a member of `oneof`_ ``filter``. - family_name_regex_filter (str): - Matches only cells from columns whose families satisfy the - given RE2 regex. For technical reasons, the regex must not - contain the ``:`` character, even if it is not being used as - a literal. Note that, since column families cannot contain - the new line character ``\n``, it is sufficient to use ``.`` - as a full wildcard when matching column family names. - - This field is a member of `oneof`_ ``filter``. - column_qualifier_regex_filter (bytes): - Matches only cells from columns whose qualifiers satisfy the - given RE2 regex. Note that, since column qualifiers can - contain arbitrary bytes, the ``\C`` escape sequence must be - used if a true wildcard is desired. The ``.`` character will - not match the new line character ``\n``, which may be - present in a binary qualifier. - - This field is a member of `oneof`_ ``filter``. - column_range_filter (google.cloud.bigtable_v2.types.ColumnRange): - Matches only cells from columns within the - given range. - - This field is a member of `oneof`_ ``filter``. - timestamp_range_filter (google.cloud.bigtable_v2.types.TimestampRange): - Matches only cells with timestamps within the - given range. - - This field is a member of `oneof`_ ``filter``. - value_regex_filter (bytes): - Matches only cells with values that satisfy the given - regular expression. Note that, since cell values can contain - arbitrary bytes, the ``\C`` escape sequence must be used if - a true wildcard is desired. The ``.`` character will not - match the new line character ``\n``, which may be present in - a binary value. - - This field is a member of `oneof`_ ``filter``. - value_range_filter (google.cloud.bigtable_v2.types.ValueRange): - Matches only cells with values that fall - within the given range. - - This field is a member of `oneof`_ ``filter``. - cells_per_row_offset_filter (int): - Skips the first N cells of each row, matching - all subsequent cells. If duplicate cells are - present, as is possible when using an - Interleave, each copy of the cell is counted - separately. - - This field is a member of `oneof`_ ``filter``. - cells_per_row_limit_filter (int): - Matches only the first N cells of each row. - If duplicate cells are present, as is possible - when using an Interleave, each copy of the cell - is counted separately. - - This field is a member of `oneof`_ ``filter``. - cells_per_column_limit_filter (int): - Matches only the most recent N cells within each column. For - example, if N=2, this filter would match column ``foo:bar`` - at timestamps 10 and 9, skip all earlier cells in - ``foo:bar``, and then begin matching again in column - ``foo:bar2``. If duplicate cells are present, as is possible - when using an Interleave, each copy of the cell is counted - separately. - - This field is a member of `oneof`_ ``filter``. - strip_value_transformer (bool): - Replaces each cell's value with the empty - string. - - This field is a member of `oneof`_ ``filter``. - apply_label_transformer (str): - Applies the given label to all cells in the output row. This - allows the client to determine which results were produced - from which part of the filter. - - Values must be at most 15 characters in length, and match - the RE2 pattern ``[a-z0-9\\-]+`` - - Due to a technical limitation, it is not currently possible - to apply multiple labels to a cell. As a result, a Chain may - have no more than one sub-filter which contains a - ``apply_label_transformer``. It is okay for an Interleave to - contain multiple ``apply_label_transformers``, as they will - be applied to separate copies of the input. This may be - relaxed in the future. - - This field is a member of `oneof`_ ``filter``. - """ - - class Chain(proto.Message): - r"""A RowFilter which sends rows through several RowFilters in - sequence. - - Attributes: - filters (MutableSequence[google.cloud.bigtable_v2.types.RowFilter]): - The elements of "filters" are chained - together to process the input row: in row -> - f(0) -> intermediate row -> f(1) -> ... -> f(N) - -> out row The full chain is executed - atomically. - """ - - filters: MutableSequence['RowFilter'] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='RowFilter', - ) - - class Interleave(proto.Message): - r"""A RowFilter which sends each row to each of several component - RowFilters and interleaves the results. - - Attributes: - filters (MutableSequence[google.cloud.bigtable_v2.types.RowFilter]): - The elements of "filters" all process a copy of the input - row, and the results are pooled, sorted, and combined into a - single output row. If multiple cells are produced with the - same column and timestamp, they will all appear in the - output row in an unspecified mutual order. Consider the - following example, with three filters: - - :: - - input row - | - ----------------------------------------------------- - | | | - f(0) f(1) f(2) - | | | - 1: foo,bar,10,x foo,bar,10,z far,bar,7,a - 2: foo,blah,11,z far,blah,5,x far,blah,5,x - | | | - ----------------------------------------------------- - | - 1: foo,bar,10,z // could have switched with #2 - 2: foo,bar,10,x // could have switched with #1 - 3: foo,blah,11,z - 4: far,bar,7,a - 5: far,blah,5,x // identical to #6 - 6: far,blah,5,x // identical to #5 - - All interleaved filters are executed atomically. - """ - - filters: MutableSequence['RowFilter'] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='RowFilter', - ) - - class Condition(proto.Message): - r"""A RowFilter which evaluates one of two possible RowFilters, - depending on whether or not a predicate RowFilter outputs any - cells from the input row. - - IMPORTANT NOTE: The predicate filter does not execute atomically - with the true and false filters, which may lead to inconsistent - or unexpected results. Additionally, Condition filters have poor - performance, especially when filters are set for the false - condition. - - Attributes: - predicate_filter (google.cloud.bigtable_v2.types.RowFilter): - If ``predicate_filter`` outputs any cells, then - ``true_filter`` will be evaluated on the input row. - Otherwise, ``false_filter`` will be evaluated. - true_filter (google.cloud.bigtable_v2.types.RowFilter): - The filter to apply to the input row if ``predicate_filter`` - returns any results. If not provided, no results will be - returned in the true case. - false_filter (google.cloud.bigtable_v2.types.RowFilter): - The filter to apply to the input row if ``predicate_filter`` - does not return any results. If not provided, no results - will be returned in the false case. - """ - - predicate_filter: 'RowFilter' = proto.Field( - proto.MESSAGE, - number=1, - message='RowFilter', - ) - true_filter: 'RowFilter' = proto.Field( - proto.MESSAGE, - number=2, - message='RowFilter', - ) - false_filter: 'RowFilter' = proto.Field( - proto.MESSAGE, - number=3, - message='RowFilter', - ) - - chain: Chain = proto.Field( - proto.MESSAGE, - number=1, - oneof='filter', - message=Chain, - ) - interleave: Interleave = proto.Field( - proto.MESSAGE, - number=2, - oneof='filter', - message=Interleave, - ) - condition: Condition = proto.Field( - proto.MESSAGE, - number=3, - oneof='filter', - message=Condition, - ) - sink: bool = proto.Field( - proto.BOOL, - number=16, - oneof='filter', - ) - pass_all_filter: bool = proto.Field( - proto.BOOL, - number=17, - oneof='filter', - ) - block_all_filter: bool = proto.Field( - proto.BOOL, - number=18, - oneof='filter', - ) - row_key_regex_filter: bytes = proto.Field( - proto.BYTES, - number=4, - oneof='filter', - ) - row_sample_filter: float = proto.Field( - proto.DOUBLE, - number=14, - oneof='filter', - ) - family_name_regex_filter: str = proto.Field( - proto.STRING, - number=5, - oneof='filter', - ) - column_qualifier_regex_filter: bytes = proto.Field( - proto.BYTES, - number=6, - oneof='filter', - ) - column_range_filter: 'ColumnRange' = proto.Field( - proto.MESSAGE, - number=7, - oneof='filter', - message='ColumnRange', - ) - timestamp_range_filter: 'TimestampRange' = proto.Field( - proto.MESSAGE, - number=8, - oneof='filter', - message='TimestampRange', - ) - value_regex_filter: bytes = proto.Field( - proto.BYTES, - number=9, - oneof='filter', - ) - value_range_filter: 'ValueRange' = proto.Field( - proto.MESSAGE, - number=15, - oneof='filter', - message='ValueRange', - ) - cells_per_row_offset_filter: int = proto.Field( - proto.INT32, - number=10, - oneof='filter', - ) - cells_per_row_limit_filter: int = proto.Field( - proto.INT32, - number=11, - oneof='filter', - ) - cells_per_column_limit_filter: int = proto.Field( - proto.INT32, - number=12, - oneof='filter', - ) - strip_value_transformer: bool = proto.Field( - proto.BOOL, - number=13, - oneof='filter', - ) - apply_label_transformer: str = proto.Field( - proto.STRING, - number=19, - oneof='filter', - ) - - -class Mutation(proto.Message): - r"""Specifies a particular change to be made to the contents of a - row. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - set_cell (google.cloud.bigtable_v2.types.Mutation.SetCell): - Set a cell's value. - - This field is a member of `oneof`_ ``mutation``. - delete_from_column (google.cloud.bigtable_v2.types.Mutation.DeleteFromColumn): - Deletes cells from a column. - - This field is a member of `oneof`_ ``mutation``. - delete_from_family (google.cloud.bigtable_v2.types.Mutation.DeleteFromFamily): - Deletes cells from a column family. - - This field is a member of `oneof`_ ``mutation``. - delete_from_row (google.cloud.bigtable_v2.types.Mutation.DeleteFromRow): - Deletes cells from the entire row. - - This field is a member of `oneof`_ ``mutation``. - """ - - class SetCell(proto.Message): - r"""A Mutation which sets the value of the specified cell. - - Attributes: - family_name (str): - The name of the family into which new data should be - written. Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier (bytes): - The qualifier of the column into which new - data should be written. Can be any byte string, - including the empty string. - timestamp_micros (int): - The timestamp of the cell into which new data - should be written. Use -1 for current Bigtable - server time. Otherwise, the client should set - this value itself, noting that the default value - is a timestamp of zero if the field is left - unspecified. Values must match the granularity - of the table (e.g. micros, millis). - value (bytes): - The value to be written into the specified - cell. - """ - - family_name: str = proto.Field( - proto.STRING, - number=1, - ) - column_qualifier: bytes = proto.Field( - proto.BYTES, - number=2, - ) - timestamp_micros: int = proto.Field( - proto.INT64, - number=3, - ) - value: bytes = proto.Field( - proto.BYTES, - number=4, - ) - - class DeleteFromColumn(proto.Message): - r"""A Mutation which deletes cells from the specified column, - optionally restricting the deletions to a given timestamp range. - - Attributes: - family_name (str): - The name of the family from which cells should be deleted. - Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier (bytes): - The qualifier of the column from which cells - should be deleted. Can be any byte string, - including the empty string. - time_range (google.cloud.bigtable_v2.types.TimestampRange): - The range of timestamps within which cells - should be deleted. - """ - - family_name: str = proto.Field( - proto.STRING, - number=1, - ) - column_qualifier: bytes = proto.Field( - proto.BYTES, - number=2, - ) - time_range: 'TimestampRange' = proto.Field( - proto.MESSAGE, - number=3, - message='TimestampRange', - ) - - class DeleteFromFamily(proto.Message): - r"""A Mutation which deletes all cells from the specified column - family. - - Attributes: - family_name (str): - The name of the family from which cells should be deleted. - Must match ``[-_.a-zA-Z0-9]+`` - """ - - family_name: str = proto.Field( - proto.STRING, - number=1, - ) - - class DeleteFromRow(proto.Message): - r"""A Mutation which deletes all cells from the containing row. - """ - - set_cell: SetCell = proto.Field( - proto.MESSAGE, - number=1, - oneof='mutation', - message=SetCell, - ) - delete_from_column: DeleteFromColumn = proto.Field( - proto.MESSAGE, - number=2, - oneof='mutation', - message=DeleteFromColumn, - ) - delete_from_family: DeleteFromFamily = proto.Field( - proto.MESSAGE, - number=3, - oneof='mutation', - message=DeleteFromFamily, - ) - delete_from_row: DeleteFromRow = proto.Field( - proto.MESSAGE, - number=4, - oneof='mutation', - message=DeleteFromRow, - ) - - -class ReadModifyWriteRule(proto.Message): - r"""Specifies an atomic read/modify/write operation on the latest - value of the specified column. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - family_name (str): - The name of the family to which the read/modify/write should - be applied. Must match ``[-_.a-zA-Z0-9]+`` - column_qualifier (bytes): - The qualifier of the column to which the - read/modify/write should be applied. - Can be any byte string, including the empty - string. - append_value (bytes): - Rule specifying that ``append_value`` be appended to the - existing value. If the targeted cell is unset, it will be - treated as containing the empty string. - - This field is a member of `oneof`_ ``rule``. - increment_amount (int): - Rule specifying that ``increment_amount`` be added to the - existing value. If the targeted cell is unset, it will be - treated as containing a zero. Otherwise, the targeted cell - must contain an 8-byte value (interpreted as a 64-bit - big-endian signed integer), or the entire request will fail. - - This field is a member of `oneof`_ ``rule``. - """ - - family_name: str = proto.Field( - proto.STRING, - number=1, - ) - column_qualifier: bytes = proto.Field( - proto.BYTES, - number=2, - ) - append_value: bytes = proto.Field( - proto.BYTES, - number=3, - oneof='rule', - ) - increment_amount: int = proto.Field( - proto.INT64, - number=4, - oneof='rule', - ) - - -class StreamPartition(proto.Message): - r"""NOTE: This API is intended to be used by Apache Beam - BigtableIO. A partition of a change stream. - - Attributes: - row_range (google.cloud.bigtable_v2.types.RowRange): - The row range covered by this partition and is specified by - [``start_key_closed``, ``end_key_open``). - """ - - row_range: 'RowRange' = proto.Field( - proto.MESSAGE, - number=1, - message='RowRange', - ) - - -class StreamContinuationTokens(proto.Message): - r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. The - information required to continue reading the data from multiple - ``StreamPartitions`` from where a previous read left off. - - Attributes: - tokens (MutableSequence[google.cloud.bigtable_v2.types.StreamContinuationToken]): - List of continuation tokens. - """ - - tokens: MutableSequence['StreamContinuationToken'] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='StreamContinuationToken', - ) - - -class StreamContinuationToken(proto.Message): - r"""NOTE: This API is intended to be used by Apache Beam BigtableIO. The - information required to continue reading the data from a - ``StreamPartition`` from where a previous read left off. - - Attributes: - partition (google.cloud.bigtable_v2.types.StreamPartition): - The partition that this token applies to. - token (str): - An encoded position in the stream to restart - reading from. - """ - - partition: 'StreamPartition' = proto.Field( - proto.MESSAGE, - number=1, - message='StreamPartition', - ) - token: str = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/feature_flags.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/feature_flags.py deleted file mode 100644 index e585b4a9c..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/feature_flags.py +++ /dev/null @@ -1,98 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.bigtable.v2', - manifest={ - 'FeatureFlags', - }, -) - - -class FeatureFlags(proto.Message): - r"""Feature flags supported or enabled by a client. This is intended to - be sent as part of request metadata to assure the server that - certain behaviors are safe to enable. This proto is meant to be - serialized and websafe-base64 encoded under the - ``bigtable-features`` metadata key. The value will remain constant - for the lifetime of a client and due to HTTP2's HPACK compression, - the request overhead will be tiny. This is an internal - implementation detail and should not be used by end users directly. - - Attributes: - reverse_scans (bool): - Notify the server that the client supports - reverse scans. The server will reject - ReadRowsRequests with the reverse bit set when - this is absent. - mutate_rows_rate_limit (bool): - Notify the server that the client enables - batch write flow control by requesting - RateLimitInfo from MutateRowsResponse. Due to - technical reasons, this disables partial - retries. - mutate_rows_rate_limit2 (bool): - Notify the server that the client enables - batch write flow control by requesting - RateLimitInfo from MutateRowsResponse. With - partial retries enabled. - last_scanned_row_responses (bool): - Notify the server that the client supports the - last_scanned_row field in ReadRowsResponse for long-running - scans. - routing_cookie (bool): - Notify the server that the client supports - using encoded routing cookie strings to retry - requests with. - retry_info (bool): - Notify the server that the client supports - using retry info back off durations to retry - requests with. - """ - - reverse_scans: bool = proto.Field( - proto.BOOL, - number=1, - ) - mutate_rows_rate_limit: bool = proto.Field( - proto.BOOL, - number=3, - ) - mutate_rows_rate_limit2: bool = proto.Field( - proto.BOOL, - number=5, - ) - last_scanned_row_responses: bool = proto.Field( - proto.BOOL, - number=4, - ) - routing_cookie: bool = proto.Field( - proto.BOOL, - number=6, - ) - retry_info: bool = proto.Field( - proto.BOOL, - number=7, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/request_stats.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/request_stats.py deleted file mode 100644 index 59ab626f5..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/request_stats.py +++ /dev/null @@ -1,171 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.bigtable.v2', - manifest={ - 'ReadIterationStats', - 'RequestLatencyStats', - 'FullReadStatsView', - 'RequestStats', - }, -) - - -class ReadIterationStats(proto.Message): - r"""ReadIterationStats captures information about the iteration - of rows or cells over the course of a read, e.g. how many - results were scanned in a read operation versus the results - returned. - - Attributes: - rows_seen_count (int): - The rows seen (scanned) as part of the - request. This includes the count of rows - returned, as captured below. - rows_returned_count (int): - The rows returned as part of the request. - cells_seen_count (int): - The cells seen (scanned) as part of the - request. This includes the count of cells - returned, as captured below. - cells_returned_count (int): - The cells returned as part of the request. - """ - - rows_seen_count: int = proto.Field( - proto.INT64, - number=1, - ) - rows_returned_count: int = proto.Field( - proto.INT64, - number=2, - ) - cells_seen_count: int = proto.Field( - proto.INT64, - number=3, - ) - cells_returned_count: int = proto.Field( - proto.INT64, - number=4, - ) - - -class RequestLatencyStats(proto.Message): - r"""RequestLatencyStats provides a measurement of the latency of - the request as it interacts with different systems over its - lifetime, e.g. how long the request took to execute within a - frontend server. - - Attributes: - frontend_server_latency (google.protobuf.duration_pb2.Duration): - The latency measured by the frontend server - handling this request, from when the request was - received, to when this value is sent back in the - response. For more context on the component that - is measuring this latency, see: - https://cloud.google.com/bigtable/docs/overview - - Note: This value may be slightly shorter than - the value reported into aggregate latency - metrics in Monitoring for this request - (https://cloud.google.com/bigtable/docs/monitoring-instance) - as this value needs to be sent in the response - before the latency measurement including that - transmission is finalized. - - Note: This value includes the end-to-end latency - of contacting nodes in the targeted cluster, - e.g. measuring from when the first byte arrives - at the frontend server, to when this value is - sent back as the last value in the response, - including any latency incurred by contacting - nodes, waiting for results from nodes, and - finally sending results from nodes back to the - caller. - """ - - frontend_server_latency: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - - -class FullReadStatsView(proto.Message): - r"""FullReadStatsView captures all known information about a - read. - - Attributes: - read_iteration_stats (google.cloud.bigtable_v2.types.ReadIterationStats): - Iteration stats describe how efficient the - read is, e.g. comparing rows seen vs. rows - returned or cells seen vs cells returned can - provide an indication of read efficiency (the - higher the ratio of seen to retuned the better). - request_latency_stats (google.cloud.bigtable_v2.types.RequestLatencyStats): - Request latency stats describe the time taken - to complete a request, from the server side. - """ - - read_iteration_stats: 'ReadIterationStats' = proto.Field( - proto.MESSAGE, - number=1, - message='ReadIterationStats', - ) - request_latency_stats: 'RequestLatencyStats' = proto.Field( - proto.MESSAGE, - number=2, - message='RequestLatencyStats', - ) - - -class RequestStats(proto.Message): - r"""RequestStats is the container for additional information pertaining - to a single request, helpful for evaluating the performance of the - sent request. Currently, there are the following supported methods: - - - google.bigtable.v2.ReadRows - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - full_read_stats_view (google.cloud.bigtable_v2.types.FullReadStatsView): - Available with the - ReadRowsRequest.RequestStatsView.REQUEST_STATS_FULL view, - see package google.bigtable.v2. - - This field is a member of `oneof`_ ``stats_view``. - """ - - full_read_stats_view: 'FullReadStatsView' = proto.Field( - proto.MESSAGE, - number=1, - oneof='stats_view', - message='FullReadStatsView', - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/response_params.py b/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/response_params.py deleted file mode 100644 index 7f1e4d8fa..000000000 --- a/owl-bot-staging/bigtable/v2/google/cloud/bigtable_v2/types/response_params.py +++ /dev/null @@ -1,64 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - - -__protobuf__ = proto.module( - package='google.bigtable.v2', - manifest={ - 'ResponseParams', - }, -) - - -class ResponseParams(proto.Message): - r"""Response metadata proto This is an experimental feature that will be - used to get zone_id and cluster_id from response trailers to tag the - metrics. This should not be used by customers directly - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - zone_id (str): - The cloud bigtable zone associated with the - cluster. - - This field is a member of `oneof`_ ``_zone_id``. - cluster_id (str): - Identifier for a cluster that represents set - of bigtable resources. - - This field is a member of `oneof`_ ``_cluster_id``. - """ - - zone_id: str = proto.Field( - proto.STRING, - number=1, - optional=True, - ) - cluster_id: str = proto.Field( - proto.STRING, - number=2, - optional=True, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable/v2/mypy.ini b/owl-bot-staging/bigtable/v2/mypy.ini deleted file mode 100644 index 574c5aed3..000000000 --- a/owl-bot-staging/bigtable/v2/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.7 -namespace_packages = True diff --git a/owl-bot-staging/bigtable/v2/noxfile.py b/owl-bot-staging/bigtable/v2/noxfile.py deleted file mode 100644 index 75496b6c8..000000000 --- a/owl-bot-staging/bigtable/v2/noxfile.py +++ /dev/null @@ -1,177 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -ALL_PYTHON = [ - "3.7", - "3.8", - "3.9", - "3.10", - "3.11", - "3.12" -] - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = 'google-cloud-bigtable' - -BLACK_VERSION = "black==22.3.0" -BLACK_PATHS = ["docs", "google", "tests", "samples", "noxfile.py", "setup.py"] -DEFAULT_PYTHON_VERSION = "3.12" - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", - "blacken", - "lint", -] - -@nox.session(python=ALL_PYTHON) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'pytest-asyncio', 'asyncmock; python_version < "3.8"') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/bigtable_v2/', - '--cov=tests/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=ALL_PYTHON) -def mypy(session): - """Run the type checker.""" - session.install( - 'mypy', - 'types-requests', - 'types-protobuf' - ) - session.install('.') - session.run( - 'mypy', - '-p', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx==7.0.1", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def lint(session): - """Run linters. - - Returns a failure if the linters find linting errors or sufficiently - serious code quality issues. - """ - session.install("flake8", BLACK_VERSION) - session.run( - "black", - "--check", - *BLACK_PATHS, - ) - session.run("flake8", "google", "tests", "samples") - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def blacken(session): - """Run black. Format code to uniform standard.""" - session.install(BLACK_VERSION) - session.run( - "black", - *BLACK_PATHS, - ) diff --git a/owl-bot-staging/bigtable/v2/scripts/fixup_bigtable_v2_keywords.py b/owl-bot-staging/bigtable/v2/scripts/fixup_bigtable_v2_keywords.py deleted file mode 100644 index 8d32e5b70..000000000 --- a/owl-bot-staging/bigtable/v2/scripts/fixup_bigtable_v2_keywords.py +++ /dev/null @@ -1,184 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class bigtableCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'check_and_mutate_row': ('table_name', 'row_key', 'app_profile_id', 'predicate_filter', 'true_mutations', 'false_mutations', ), - 'generate_initial_change_stream_partitions': ('table_name', 'app_profile_id', ), - 'mutate_row': ('table_name', 'row_key', 'mutations', 'app_profile_id', ), - 'mutate_rows': ('table_name', 'entries', 'app_profile_id', ), - 'ping_and_warm': ('name', 'app_profile_id', ), - 'read_change_stream': ('table_name', 'app_profile_id', 'partition', 'start_time', 'continuation_tokens', 'end_time', 'heartbeat_duration', ), - 'read_modify_write_row': ('table_name', 'row_key', 'rules', 'app_profile_id', ), - 'read_rows': ('table_name', 'app_profile_id', 'rows', 'filter', 'rows_limit', 'request_stats_view', 'reversed', ), - 'sample_row_keys': ('table_name', 'app_profile_id', ), - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=bigtableCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the bigtable client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/bigtable/v2/setup.py b/owl-bot-staging/bigtable/v2/setup.py deleted file mode 100644 index addfe4d5b..000000000 --- a/owl-bot-staging/bigtable/v2/setup.py +++ /dev/null @@ -1,90 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import re - -import setuptools # type: ignore - -package_root = os.path.abspath(os.path.dirname(__file__)) - -name = 'google-cloud-bigtable' - - -description = "Google Cloud Bigtable API client library" - -version = None - -with open(os.path.join(package_root, 'google/cloud/bigtable/gapic_version.py')) as fp: - version_candidates = re.findall(r"(?<=\")\d+.\d+.\d+(?=\")", fp.read()) - assert (len(version_candidates) == 1) - version = version_candidates[0] - -if version[0] == "0": - release_status = "Development Status :: 4 - Beta" -else: - release_status = "Development Status :: 5 - Production/Stable" - -dependencies = [ - "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", - "proto-plus >= 1.22.3, <2.0.0dev", - "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", -] -url = "https://github.com/googleapis/google-cloud-python/tree/main/packages/google-cloud-bigtable" - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, "README.rst") -with io.open(readme_filename, encoding="utf-8") as readme_file: - readme = readme_file.read() - -packages = [ - package - for package in setuptools.find_namespace_packages() - if package.startswith("google") -] - -setuptools.setup( - name=name, - version=version, - description=description, - long_description=readme, - author="Google LLC", - author_email="googleapis-packages@google.com", - license="Apache 2.0", - url=url, - classifiers=[ - release_status, - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Operating System :: OS Independent", - "Topic :: Internet", - ], - platforms="Posix; MacOS X; Windows", - packages=packages, - python_requires=">=3.7", - install_requires=dependencies, - include_package_data=True, - zip_safe=False, -) diff --git a/owl-bot-staging/bigtable/v2/testing/constraints-3.10.txt b/owl-bot-staging/bigtable/v2/testing/constraints-3.10.txt deleted file mode 100644 index ed7f9aed2..000000000 --- a/owl-bot-staging/bigtable/v2/testing/constraints-3.10.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/bigtable/v2/testing/constraints-3.11.txt b/owl-bot-staging/bigtable/v2/testing/constraints-3.11.txt deleted file mode 100644 index ed7f9aed2..000000000 --- a/owl-bot-staging/bigtable/v2/testing/constraints-3.11.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/bigtable/v2/testing/constraints-3.12.txt b/owl-bot-staging/bigtable/v2/testing/constraints-3.12.txt deleted file mode 100644 index ed7f9aed2..000000000 --- a/owl-bot-staging/bigtable/v2/testing/constraints-3.12.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/bigtable/v2/testing/constraints-3.7.txt b/owl-bot-staging/bigtable/v2/testing/constraints-3.7.txt deleted file mode 100644 index 185f7d366..000000000 --- a/owl-bot-staging/bigtable/v2/testing/constraints-3.7.txt +++ /dev/null @@ -1,9 +0,0 @@ -# This constraints file is used to check that lower bounds -# are correct in setup.py -# List all library dependencies and extras in this file. -# Pin the version to the lower bound. -# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", -# Then this file should have google-cloud-foo==1.14.0 -google-api-core==1.34.0 -proto-plus==1.22.3 -protobuf==3.19.5 diff --git a/owl-bot-staging/bigtable/v2/testing/constraints-3.8.txt b/owl-bot-staging/bigtable/v2/testing/constraints-3.8.txt deleted file mode 100644 index ed7f9aed2..000000000 --- a/owl-bot-staging/bigtable/v2/testing/constraints-3.8.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/bigtable/v2/testing/constraints-3.9.txt b/owl-bot-staging/bigtable/v2/testing/constraints-3.9.txt deleted file mode 100644 index ed7f9aed2..000000000 --- a/owl-bot-staging/bigtable/v2/testing/constraints-3.9.txt +++ /dev/null @@ -1,6 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf diff --git a/owl-bot-staging/bigtable/v2/tests/__init__.py b/owl-bot-staging/bigtable/v2/tests/__init__.py deleted file mode 100644 index 1b4db446e..000000000 --- a/owl-bot-staging/bigtable/v2/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/bigtable/v2/tests/unit/__init__.py b/owl-bot-staging/bigtable/v2/tests/unit/__init__.py deleted file mode 100644 index 1b4db446e..000000000 --- a/owl-bot-staging/bigtable/v2/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/bigtable/v2/tests/unit/gapic/__init__.py b/owl-bot-staging/bigtable/v2/tests/unit/gapic/__init__.py deleted file mode 100644 index 1b4db446e..000000000 --- a/owl-bot-staging/bigtable/v2/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/__init__.py b/owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/__init__.py deleted file mode 100644 index 1b4db446e..000000000 --- a/owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/test_bigtable.py b/owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/test_bigtable.py deleted file mode 100644 index e0c100281..000000000 --- a/owl-bot-staging/bigtable/v2/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ /dev/null @@ -1,5860 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -# try/except added for compatibility with python < 3.8 -try: - from unittest import mock - from unittest.mock import AsyncMock # pragma: NO COVER -except ImportError: # pragma: NO COVER - import mock - -import grpc -from grpc.experimental import aio -from collections.abc import Iterable -from google.protobuf import json_format -import json -import math -import pytest -from google.api_core import api_core_version -from proto.marshal.rules.dates import DurationRule, TimestampRule -from proto.marshal.rules import wrappers -from requests import Response -from requests import Request, PreparedRequest -from requests.sessions import Session -from google.protobuf import json_format - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.bigtable_v2.services.bigtable import BigtableAsyncClient -from google.cloud.bigtable_v2.services.bigtable import BigtableClient -from google.cloud.bigtable_v2.services.bigtable import transports -from google.cloud.bigtable_v2.types import bigtable -from google.cloud.bigtable_v2.types import data -from google.cloud.bigtable_v2.types import request_stats -from google.oauth2 import service_account -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - -# If default endpoint template is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint template so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint_template(client): - return "test.{UNIVERSE_DOMAIN}" if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) else client._DEFAULT_ENDPOINT_TEMPLATE - -# Anonymous Credentials with universe domain property. If no universe domain is provided, then -# the default universe domain is "googleapis.com". -class _AnonymousCredentialsWithUniverseDomain(ga_credentials.AnonymousCredentials): - def __init__(self, universe_domain="googleapis.com"): - super(_AnonymousCredentialsWithUniverseDomain, self).__init__() - self._universe_domain = universe_domain - - @property - def universe_domain(self): - return self._universe_domain - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert BigtableClient._get_default_mtls_endpoint(None) is None - assert BigtableClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert BigtableClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert BigtableClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert BigtableClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert BigtableClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - -def test__read_environment_variables(): - assert BigtableClient._read_environment_variables() == (False, "auto", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - assert BigtableClient._read_environment_variables() == (True, "auto", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - assert BigtableClient._read_environment_variables() == (False, "auto", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError) as excinfo: - BigtableClient._read_environment_variables() - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - assert BigtableClient._read_environment_variables() == (False, "never", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - assert BigtableClient._read_environment_variables() == (False, "always", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): - assert BigtableClient._read_environment_variables() == (False, "auto", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - BigtableClient._read_environment_variables() - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - - with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): - assert BigtableClient._read_environment_variables() == (False, "auto", "foo.com") - -def test__get_client_cert_source(): - mock_provided_cert_source = mock.Mock() - mock_default_cert_source = mock.Mock() - - assert BigtableClient._get_client_cert_source(None, False) is None - assert BigtableClient._get_client_cert_source(mock_provided_cert_source, False) is None - assert BigtableClient._get_client_cert_source(mock_provided_cert_source, True) == mock_provided_cert_source - - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_default_cert_source): - assert BigtableClient._get_client_cert_source(None, True) is mock_default_cert_source - assert BigtableClient._get_client_cert_source(mock_provided_cert_source, "true") is mock_provided_cert_source - -@mock.patch.object(BigtableClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableClient)) -@mock.patch.object(BigtableAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableAsyncClient)) -def test__get_api_endpoint(): - api_override = "foo.com" - mock_client_cert_source = mock.Mock() - default_universe = BigtableClient._DEFAULT_UNIVERSE - default_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=default_universe) - mock_universe = "bar.com" - mock_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=mock_universe) - - assert BigtableClient._get_api_endpoint(api_override, mock_client_cert_source, default_universe, "always") == api_override - assert BigtableClient._get_api_endpoint(None, mock_client_cert_source, default_universe, "auto") == BigtableClient.DEFAULT_MTLS_ENDPOINT - assert BigtableClient._get_api_endpoint(None, None, default_universe, "auto") == default_endpoint - assert BigtableClient._get_api_endpoint(None, None, default_universe, "always") == BigtableClient.DEFAULT_MTLS_ENDPOINT - assert BigtableClient._get_api_endpoint(None, mock_client_cert_source, default_universe, "always") == BigtableClient.DEFAULT_MTLS_ENDPOINT - assert BigtableClient._get_api_endpoint(None, None, mock_universe, "never") == mock_endpoint - assert BigtableClient._get_api_endpoint(None, None, default_universe, "never") == default_endpoint - - with pytest.raises(MutualTLSChannelError) as excinfo: - BigtableClient._get_api_endpoint(None, mock_client_cert_source, mock_universe, "auto") - assert str(excinfo.value) == "mTLS is not supported in any universe other than googleapis.com." - -def test__get_universe_domain(): - client_universe_domain = "foo.com" - universe_domain_env = "bar.com" - - assert BigtableClient._get_universe_domain(client_universe_domain, universe_domain_env) == client_universe_domain - assert BigtableClient._get_universe_domain(None, universe_domain_env) == universe_domain_env - assert BigtableClient._get_universe_domain(None, None) == BigtableClient._DEFAULT_UNIVERSE - - with pytest.raises(ValueError) as excinfo: - BigtableClient._get_universe_domain("", None) - assert str(excinfo.value) == "Universe Domain cannot be an empty string." - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (BigtableClient, transports.BigtableGrpcTransport, "grpc"), - (BigtableClient, transports.BigtableRestTransport, "rest"), -]) -def test__validate_universe_domain(client_class, transport_class, transport_name): - client = client_class( - transport=transport_class( - credentials=_AnonymousCredentialsWithUniverseDomain() - ) - ) - assert client._validate_universe_domain() == True - - # Test the case when universe is already validated. - assert client._validate_universe_domain() == True - - if transport_name == "grpc": - # Test the case where credentials are provided by the - # `local_channel_credentials`. The default universes in both match. - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - client = client_class(transport=transport_class(channel=channel)) - assert client._validate_universe_domain() == True - - # Test the case where credentials do not exist: e.g. a transport is provided - # with no credentials. Validation should still succeed because there is no - # mismatch with non-existent credentials. - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - transport=transport_class(channel=channel) - transport._credentials = None - client = client_class(transport=transport) - assert client._validate_universe_domain() == True - - # Test the case when there is a universe mismatch from the credentials. - client = client_class( - transport=transport_class(credentials=_AnonymousCredentialsWithUniverseDomain(universe_domain="foo.com")) - ) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert str(excinfo.value) == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - - # Test the case when there is a universe mismatch from the client. - # - # TODO: Make this test unconditional once the minimum supported version of - # google-api-core becomes 2.15.0 or higher. - api_core_major, api_core_minor, _ = [int(part) for part in api_core_version.__version__.split(".")] - if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): - client = client_class(client_options={"universe_domain": "bar.com"}, transport=transport_class(credentials=_AnonymousCredentialsWithUniverseDomain(),)) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert str(excinfo.value) == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - -@pytest.mark.parametrize("client_class,transport_name", [ - (BigtableClient, "grpc"), - (BigtableAsyncClient, "grpc_asyncio"), - (BigtableClient, "rest"), -]) -def test_bigtable_client_from_service_account_info(client_class, transport_name): - creds = _AnonymousCredentialsWithUniverseDomain() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info, transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'bigtable.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://bigtable.googleapis.com' - ) - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.BigtableGrpcTransport, "grpc"), - (transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), - (transports.BigtableRestTransport, "rest"), -]) -def test_bigtable_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class,transport_name", [ - (BigtableClient, "grpc"), - (BigtableAsyncClient, "grpc_asyncio"), - (BigtableClient, "rest"), -]) -def test_bigtable_client_from_service_account_file(client_class, transport_name): - creds = _AnonymousCredentialsWithUniverseDomain() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'bigtable.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://bigtable.googleapis.com' - ) - - -def test_bigtable_client_get_transport_class(): - transport = BigtableClient.get_transport_class() - available_transports = [ - transports.BigtableGrpcTransport, - transports.BigtableRestTransport, - ] - assert transport in available_transports - - transport = BigtableClient.get_transport_class("grpc") - assert transport == transports.BigtableGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (BigtableClient, transports.BigtableGrpcTransport, "grpc"), - (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), - (BigtableClient, transports.BigtableRestTransport, "rest"), -]) -@mock.patch.object(BigtableClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableClient)) -@mock.patch.object(BigtableAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableAsyncClient)) -def test_bigtable_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(BigtableClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=_AnonymousCredentialsWithUniverseDomain() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(BigtableClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - client = client_class(transport=transport_name) - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError) as excinfo: - client = client_class(transport=transport_name) - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - # Check the case api_endpoint is provided - options = client_options.ClientOptions(api_audience="https://language.googleapis.com") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience="https://language.googleapis.com" - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (BigtableClient, transports.BigtableGrpcTransport, "grpc", "true"), - (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (BigtableClient, transports.BigtableGrpcTransport, "grpc", "false"), - (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio", "false"), - (BigtableClient, transports.BigtableRestTransport, "rest", "true"), - (BigtableClient, transports.BigtableRestTransport, "rest", "false"), -]) -@mock.patch.object(BigtableClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableClient)) -@mock.patch.object(BigtableAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_bigtable_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE) - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE) - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class", [ - BigtableClient, BigtableAsyncClient -]) -@mock.patch.object(BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient)) -@mock.patch.object(BigtableAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableAsyncClient)) -def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class): - mock_client_cert_source = mock.Mock() - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source == mock_client_cert_source - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - mock_client_cert_source = mock.Mock() - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source == mock_client_cert_source - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - client_class.get_mtls_endpoint_and_cert_source() - - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError) as excinfo: - client_class.get_mtls_endpoint_and_cert_source() - - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - -@pytest.mark.parametrize("client_class", [ - BigtableClient, BigtableAsyncClient -]) -@mock.patch.object(BigtableClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableClient)) -@mock.patch.object(BigtableAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableAsyncClient)) -def test_bigtable_client_client_api_endpoint(client_class): - mock_client_cert_source = client_cert_source_callback - api_override = "foo.com" - default_universe = BigtableClient._DEFAULT_UNIVERSE - default_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=default_universe) - mock_universe = "bar.com" - mock_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=mock_universe) - - # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", - # use ClientOptions.api_endpoint as the api endpoint regardless. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"): - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=api_override) - client = client_class(client_options=options, credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == api_override - - # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - client = client_class(credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == default_endpoint - - # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", - # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - client = client_class(credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - - # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), - # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, - # and ClientOptions.universe_domain="bar.com", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. - options = client_options.ClientOptions() - universe_exists = hasattr(options, "universe_domain") - if universe_exists: - options = client_options.ClientOptions(universe_domain=mock_universe) - client = client_class(client_options=options, credentials=_AnonymousCredentialsWithUniverseDomain()) - else: - client = client_class(client_options=options, credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == (mock_endpoint if universe_exists else default_endpoint) - assert client.universe_domain == (mock_universe if universe_exists else default_universe) - - # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. - options = client_options.ClientOptions() - if hasattr(options, "universe_domain"): - delattr(options, "universe_domain") - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - client = client_class(client_options=options, credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == default_endpoint - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (BigtableClient, transports.BigtableGrpcTransport, "grpc"), - (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio"), - (BigtableClient, transports.BigtableRestTransport, "rest"), -]) -def test_bigtable_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (BigtableClient, transports.BigtableGrpcTransport, "grpc", grpc_helpers), - (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), - (BigtableClient, transports.BigtableRestTransport, "rest", None), -]) -def test_bigtable_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -def test_bigtable_client_client_options_from_dict(): - with mock.patch('google.cloud.bigtable_v2.services.bigtable.transports.BigtableGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = BigtableClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (BigtableClient, transports.BigtableGrpcTransport, "grpc", grpc_helpers), - (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), -]) -def test_bigtable_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # test that the credentials from file are saved and used as the credentials. - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel" - ) as create_channel: - creds = _AnonymousCredentialsWithUniverseDomain() - file_creds = _AnonymousCredentialsWithUniverseDomain() - load_creds.return_value = (file_creds, None) - adc.return_value = (creds, None) - client = client_class(client_options=options, transport=transport_name) - create_channel.assert_called_with( - "bigtable.googleapis.com:443", - credentials=file_creds, - credentials_file=None, - quota_project_id=None, - default_scopes=( - 'https://www.googleapis.com/auth/bigtable.data', - 'https://www.googleapis.com/auth/bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-bigtable.data', - 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - scopes=None, - default_host="bigtable.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.ReadRowsRequest, - dict, -]) -def test_read_rows(request_type, transport: str = 'grpc'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_rows), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.ReadRowsResponse()]) - response = client.read_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadRowsRequest() - - # Establish that the response is the type that we expect. - for message in response: - assert isinstance(message, bigtable.ReadRowsResponse) - - -def test_read_rows_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_rows), - '__call__') as call: - client.read_rows() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadRowsRequest() - -@pytest.mark.asyncio -async def test_read_rows_async(transport: str = 'grpc_asyncio', request_type=bigtable.ReadRowsRequest): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_rows), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[bigtable.ReadRowsResponse()]) - response = await client.read_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadRowsRequest() - - # Establish that the response is the type that we expect. - message = await response.read() - assert isinstance(message, bigtable.ReadRowsResponse) - - -@pytest.mark.asyncio -async def test_read_rows_async_from_dict(): - await test_read_rows_async(request_type=dict) - -def test_read_rows_routing_parameters(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadRowsRequest(**{"table_name": "projects/sample1/instances/sample2/tables/sample3"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_rows), - '__call__') as call: - call.return_value = iter([bigtable.ReadRowsResponse()]) - client.read_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadRowsRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_rows), - '__call__') as call: - call.return_value = iter([bigtable.ReadRowsResponse()]) - client.read_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - - -def test_read_rows_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_rows), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.ReadRowsResponse()]) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.read_rows( - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - - -def test_read_rows_flattened_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_rows( - bigtable.ReadRowsRequest(), - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - -@pytest.mark.asyncio -async def test_read_rows_flattened_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_rows), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.ReadRowsResponse()]) - - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.read_rows( - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_read_rows_flattened_error_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.read_rows( - bigtable.ReadRowsRequest(), - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.SampleRowKeysRequest, - dict, -]) -def test_sample_row_keys(request_type, transport: str = 'grpc'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.sample_row_keys), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.SampleRowKeysResponse()]) - response = client.sample_row_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.SampleRowKeysRequest() - - # Establish that the response is the type that we expect. - for message in response: - assert isinstance(message, bigtable.SampleRowKeysResponse) - - -def test_sample_row_keys_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.sample_row_keys), - '__call__') as call: - client.sample_row_keys() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.SampleRowKeysRequest() - -@pytest.mark.asyncio -async def test_sample_row_keys_async(transport: str = 'grpc_asyncio', request_type=bigtable.SampleRowKeysRequest): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.sample_row_keys), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[bigtable.SampleRowKeysResponse()]) - response = await client.sample_row_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.SampleRowKeysRequest() - - # Establish that the response is the type that we expect. - message = await response.read() - assert isinstance(message, bigtable.SampleRowKeysResponse) - - -@pytest.mark.asyncio -async def test_sample_row_keys_async_from_dict(): - await test_sample_row_keys_async(request_type=dict) - -def test_sample_row_keys_routing_parameters(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.SampleRowKeysRequest(**{"table_name": "projects/sample1/instances/sample2/tables/sample3"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.sample_row_keys), - '__call__') as call: - call.return_value = iter([bigtable.SampleRowKeysResponse()]) - client.sample_row_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.SampleRowKeysRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.sample_row_keys), - '__call__') as call: - call.return_value = iter([bigtable.SampleRowKeysResponse()]) - client.sample_row_keys(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - - -def test_sample_row_keys_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.sample_row_keys), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.SampleRowKeysResponse()]) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.sample_row_keys( - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - - -def test_sample_row_keys_flattened_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.sample_row_keys( - bigtable.SampleRowKeysRequest(), - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - -@pytest.mark.asyncio -async def test_sample_row_keys_flattened_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.sample_row_keys), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.SampleRowKeysResponse()]) - - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.sample_row_keys( - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_sample_row_keys_flattened_error_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.sample_row_keys( - bigtable.SampleRowKeysRequest(), - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.MutateRowRequest, - dict, -]) -def test_mutate_row(request_type, transport: str = 'grpc'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_row), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable.MutateRowResponse( - ) - response = client.mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.MutateRowResponse) - - -def test_mutate_row_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_row), - '__call__') as call: - client.mutate_row() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowRequest() - -@pytest.mark.asyncio -async def test_mutate_row_async(transport: str = 'grpc_asyncio', request_type=bigtable.MutateRowRequest): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_row), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable.MutateRowResponse( - )) - response = await client.mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.MutateRowResponse) - - -@pytest.mark.asyncio -async def test_mutate_row_async_from_dict(): - await test_mutate_row_async(request_type=dict) - -def test_mutate_row_routing_parameters(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowRequest(**{"table_name": "projects/sample1/instances/sample2/tables/sample3"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_row), - '__call__') as call: - call.return_value = bigtable.MutateRowResponse() - client.mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_row), - '__call__') as call: - call.return_value = bigtable.MutateRowResponse() - client.mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - - -def test_mutate_row_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_row), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable.MutateRowResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.mutate_row( - table_name='table_name_value', - row_key=b'row_key_blob', - mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].row_key - mock_val = b'row_key_blob' - assert arg == mock_val - arg = args[0].mutations - mock_val = [data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))] - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - - -def test_mutate_row_flattened_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.mutate_row( - bigtable.MutateRowRequest(), - table_name='table_name_value', - row_key=b'row_key_blob', - mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - app_profile_id='app_profile_id_value', - ) - -@pytest.mark.asyncio -async def test_mutate_row_flattened_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_row), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable.MutateRowResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable.MutateRowResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.mutate_row( - table_name='table_name_value', - row_key=b'row_key_blob', - mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].row_key - mock_val = b'row_key_blob' - assert arg == mock_val - arg = args[0].mutations - mock_val = [data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))] - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_mutate_row_flattened_error_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.mutate_row( - bigtable.MutateRowRequest(), - table_name='table_name_value', - row_key=b'row_key_blob', - mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - app_profile_id='app_profile_id_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.MutateRowsRequest, - dict, -]) -def test_mutate_rows(request_type, transport: str = 'grpc'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_rows), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.MutateRowsResponse()]) - response = client.mutate_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowsRequest() - - # Establish that the response is the type that we expect. - for message in response: - assert isinstance(message, bigtable.MutateRowsResponse) - - -def test_mutate_rows_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_rows), - '__call__') as call: - client.mutate_rows() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowsRequest() - -@pytest.mark.asyncio -async def test_mutate_rows_async(transport: str = 'grpc_asyncio', request_type=bigtable.MutateRowsRequest): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_rows), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[bigtable.MutateRowsResponse()]) - response = await client.mutate_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.MutateRowsRequest() - - # Establish that the response is the type that we expect. - message = await response.read() - assert isinstance(message, bigtable.MutateRowsResponse) - - -@pytest.mark.asyncio -async def test_mutate_rows_async_from_dict(): - await test_mutate_rows_async(request_type=dict) - -def test_mutate_rows_routing_parameters(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowsRequest(**{"table_name": "projects/sample1/instances/sample2/tables/sample3"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_rows), - '__call__') as call: - call.return_value = iter([bigtable.MutateRowsResponse()]) - client.mutate_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.MutateRowsRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_rows), - '__call__') as call: - call.return_value = iter([bigtable.MutateRowsResponse()]) - client.mutate_rows(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - - -def test_mutate_rows_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_rows), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.MutateRowsResponse()]) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.mutate_rows( - table_name='table_name_value', - entries=[bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')], - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].entries - mock_val = [bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')] - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - - -def test_mutate_rows_flattened_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.mutate_rows( - bigtable.MutateRowsRequest(), - table_name='table_name_value', - entries=[bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')], - app_profile_id='app_profile_id_value', - ) - -@pytest.mark.asyncio -async def test_mutate_rows_flattened_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.mutate_rows), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.MutateRowsResponse()]) - - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.mutate_rows( - table_name='table_name_value', - entries=[bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')], - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].entries - mock_val = [bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')] - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_mutate_rows_flattened_error_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.mutate_rows( - bigtable.MutateRowsRequest(), - table_name='table_name_value', - entries=[bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')], - app_profile_id='app_profile_id_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.CheckAndMutateRowRequest, - dict, -]) -def test_check_and_mutate_row(request_type, transport: str = 'grpc'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable.CheckAndMutateRowResponse( - predicate_matched=True, - ) - response = client.check_and_mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.CheckAndMutateRowRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.CheckAndMutateRowResponse) - assert response.predicate_matched is True - - -def test_check_and_mutate_row_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), - '__call__') as call: - client.check_and_mutate_row() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.CheckAndMutateRowRequest() - -@pytest.mark.asyncio -async def test_check_and_mutate_row_async(transport: str = 'grpc_asyncio', request_type=bigtable.CheckAndMutateRowRequest): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable.CheckAndMutateRowResponse( - predicate_matched=True, - )) - response = await client.check_and_mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.CheckAndMutateRowRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.CheckAndMutateRowResponse) - assert response.predicate_matched is True - - -@pytest.mark.asyncio -async def test_check_and_mutate_row_async_from_dict(): - await test_check_and_mutate_row_async(request_type=dict) - -def test_check_and_mutate_row_routing_parameters(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.CheckAndMutateRowRequest(**{"table_name": "projects/sample1/instances/sample2/tables/sample3"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), - '__call__') as call: - call.return_value = bigtable.CheckAndMutateRowResponse() - client.check_and_mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.CheckAndMutateRowRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), - '__call__') as call: - call.return_value = bigtable.CheckAndMutateRowResponse() - client.check_and_mutate_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - - -def test_check_and_mutate_row_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable.CheckAndMutateRowResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.check_and_mutate_row( - table_name='table_name_value', - row_key=b'row_key_blob', - predicate_filter=data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])), - true_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - false_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].row_key - mock_val = b'row_key_blob' - assert arg == mock_val - arg = args[0].predicate_filter - mock_val = data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])) - assert arg == mock_val - arg = args[0].true_mutations - mock_val = [data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))] - assert arg == mock_val - arg = args[0].false_mutations - mock_val = [data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))] - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - - -def test_check_and_mutate_row_flattened_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.check_and_mutate_row( - bigtable.CheckAndMutateRowRequest(), - table_name='table_name_value', - row_key=b'row_key_blob', - predicate_filter=data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])), - true_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - false_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - app_profile_id='app_profile_id_value', - ) - -@pytest.mark.asyncio -async def test_check_and_mutate_row_flattened_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_and_mutate_row), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable.CheckAndMutateRowResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable.CheckAndMutateRowResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.check_and_mutate_row( - table_name='table_name_value', - row_key=b'row_key_blob', - predicate_filter=data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])), - true_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - false_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].row_key - mock_val = b'row_key_blob' - assert arg == mock_val - arg = args[0].predicate_filter - mock_val = data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])) - assert arg == mock_val - arg = args[0].true_mutations - mock_val = [data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))] - assert arg == mock_val - arg = args[0].false_mutations - mock_val = [data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))] - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_check_and_mutate_row_flattened_error_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.check_and_mutate_row( - bigtable.CheckAndMutateRowRequest(), - table_name='table_name_value', - row_key=b'row_key_blob', - predicate_filter=data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])), - true_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - false_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - app_profile_id='app_profile_id_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.PingAndWarmRequest, - dict, -]) -def test_ping_and_warm(request_type, transport: str = 'grpc'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.ping_and_warm), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable.PingAndWarmResponse( - ) - response = client.ping_and_warm(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.PingAndWarmRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.PingAndWarmResponse) - - -def test_ping_and_warm_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.ping_and_warm), - '__call__') as call: - client.ping_and_warm() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.PingAndWarmRequest() - -@pytest.mark.asyncio -async def test_ping_and_warm_async(transport: str = 'grpc_asyncio', request_type=bigtable.PingAndWarmRequest): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.ping_and_warm), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable.PingAndWarmResponse( - )) - response = await client.ping_and_warm(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.PingAndWarmRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.PingAndWarmResponse) - - -@pytest.mark.asyncio -async def test_ping_and_warm_async_from_dict(): - await test_ping_and_warm_async(request_type=dict) - -def test_ping_and_warm_routing_parameters(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.PingAndWarmRequest(**{"name": "projects/sample1/instances/sample2"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.ping_and_warm), - '__call__') as call: - call.return_value = bigtable.PingAndWarmResponse() - client.ping_and_warm(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.PingAndWarmRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.ping_and_warm), - '__call__') as call: - call.return_value = bigtable.PingAndWarmResponse() - client.ping_and_warm(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - - -def test_ping_and_warm_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.ping_and_warm), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable.PingAndWarmResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.ping_and_warm( - name='name_value', - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - - -def test_ping_and_warm_flattened_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.ping_and_warm( - bigtable.PingAndWarmRequest(), - name='name_value', - app_profile_id='app_profile_id_value', - ) - -@pytest.mark.asyncio -async def test_ping_and_warm_flattened_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.ping_and_warm), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable.PingAndWarmResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable.PingAndWarmResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.ping_and_warm( - name='name_value', - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_ping_and_warm_flattened_error_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.ping_and_warm( - bigtable.PingAndWarmRequest(), - name='name_value', - app_profile_id='app_profile_id_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.ReadModifyWriteRowRequest, - dict, -]) -def test_read_modify_write_row(request_type, transport: str = 'grpc'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable.ReadModifyWriteRowResponse( - ) - response = client.read_modify_write_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadModifyWriteRowRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadModifyWriteRowResponse) - - -def test_read_modify_write_row_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), - '__call__') as call: - client.read_modify_write_row() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadModifyWriteRowRequest() - -@pytest.mark.asyncio -async def test_read_modify_write_row_async(transport: str = 'grpc_asyncio', request_type=bigtable.ReadModifyWriteRowRequest): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable.ReadModifyWriteRowResponse( - )) - response = await client.read_modify_write_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadModifyWriteRowRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadModifyWriteRowResponse) - - -@pytest.mark.asyncio -async def test_read_modify_write_row_async_from_dict(): - await test_read_modify_write_row_async(request_type=dict) - -def test_read_modify_write_row_routing_parameters(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadModifyWriteRowRequest(**{"table_name": "projects/sample1/instances/sample2/tables/sample3"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), - '__call__') as call: - call.return_value = bigtable.ReadModifyWriteRowResponse() - client.read_modify_write_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadModifyWriteRowRequest(**{"app_profile_id": "sample1"}) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), - '__call__') as call: - call.return_value = bigtable.ReadModifyWriteRowResponse() - client.read_modify_write_row(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - _, _, kw = call.mock_calls[0] - # This test doesn't assert anything useful. - assert kw['metadata'] - - -def test_read_modify_write_row_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable.ReadModifyWriteRowResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.read_modify_write_row( - table_name='table_name_value', - row_key=b'row_key_blob', - rules=[data.ReadModifyWriteRule(family_name='family_name_value')], - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].row_key - mock_val = b'row_key_blob' - assert arg == mock_val - arg = args[0].rules - mock_val = [data.ReadModifyWriteRule(family_name='family_name_value')] - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - - -def test_read_modify_write_row_flattened_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_modify_write_row( - bigtable.ReadModifyWriteRowRequest(), - table_name='table_name_value', - row_key=b'row_key_blob', - rules=[data.ReadModifyWriteRule(family_name='family_name_value')], - app_profile_id='app_profile_id_value', - ) - -@pytest.mark.asyncio -async def test_read_modify_write_row_flattened_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_modify_write_row), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable.ReadModifyWriteRowResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable.ReadModifyWriteRowResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.read_modify_write_row( - table_name='table_name_value', - row_key=b'row_key_blob', - rules=[data.ReadModifyWriteRule(family_name='family_name_value')], - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].row_key - mock_val = b'row_key_blob' - assert arg == mock_val - arg = args[0].rules - mock_val = [data.ReadModifyWriteRule(family_name='family_name_value')] - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_read_modify_write_row_flattened_error_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.read_modify_write_row( - bigtable.ReadModifyWriteRowRequest(), - table_name='table_name_value', - row_key=b'row_key_blob', - rules=[data.ReadModifyWriteRule(family_name='family_name_value')], - app_profile_id='app_profile_id_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.GenerateInitialChangeStreamPartitionsRequest, - dict, -]) -def test_generate_initial_change_stream_partitions(request_type, transport: str = 'grpc'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_initial_change_stream_partitions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.GenerateInitialChangeStreamPartitionsResponse()]) - response = client.generate_initial_change_stream_partitions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() - - # Establish that the response is the type that we expect. - for message in response: - assert isinstance(message, bigtable.GenerateInitialChangeStreamPartitionsResponse) - - -def test_generate_initial_change_stream_partitions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_initial_change_stream_partitions), - '__call__') as call: - client.generate_initial_change_stream_partitions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() - -@pytest.mark.asyncio -async def test_generate_initial_change_stream_partitions_async(transport: str = 'grpc_asyncio', request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_initial_change_stream_partitions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[bigtable.GenerateInitialChangeStreamPartitionsResponse()]) - response = await client.generate_initial_change_stream_partitions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.GenerateInitialChangeStreamPartitionsRequest() - - # Establish that the response is the type that we expect. - message = await response.read() - assert isinstance(message, bigtable.GenerateInitialChangeStreamPartitionsResponse) - - -@pytest.mark.asyncio -async def test_generate_initial_change_stream_partitions_async_from_dict(): - await test_generate_initial_change_stream_partitions_async(request_type=dict) - - -def test_generate_initial_change_stream_partitions_field_headers(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.GenerateInitialChangeStreamPartitionsRequest() - - request.table_name = 'table_name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_initial_change_stream_partitions), - '__call__') as call: - call.return_value = iter([bigtable.GenerateInitialChangeStreamPartitionsResponse()]) - client.generate_initial_change_stream_partitions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'table_name=table_name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_generate_initial_change_stream_partitions_field_headers_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.GenerateInitialChangeStreamPartitionsRequest() - - request.table_name = 'table_name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_initial_change_stream_partitions), - '__call__') as call: - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[bigtable.GenerateInitialChangeStreamPartitionsResponse()]) - await client.generate_initial_change_stream_partitions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'table_name=table_name_value', - ) in kw['metadata'] - - -def test_generate_initial_change_stream_partitions_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_initial_change_stream_partitions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.GenerateInitialChangeStreamPartitionsResponse()]) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.generate_initial_change_stream_partitions( - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - - -def test_generate_initial_change_stream_partitions_flattened_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.generate_initial_change_stream_partitions( - bigtable.GenerateInitialChangeStreamPartitionsRequest(), - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - -@pytest.mark.asyncio -async def test_generate_initial_change_stream_partitions_flattened_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_initial_change_stream_partitions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.GenerateInitialChangeStreamPartitionsResponse()]) - - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.generate_initial_change_stream_partitions( - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_generate_initial_change_stream_partitions_flattened_error_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.generate_initial_change_stream_partitions( - bigtable.GenerateInitialChangeStreamPartitionsRequest(), - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.ReadChangeStreamRequest, - dict, -]) -def test_read_change_stream(request_type, transport: str = 'grpc'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_change_stream), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.ReadChangeStreamResponse()]) - response = client.read_change_stream(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadChangeStreamRequest() - - # Establish that the response is the type that we expect. - for message in response: - assert isinstance(message, bigtable.ReadChangeStreamResponse) - - -def test_read_change_stream_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_change_stream), - '__call__') as call: - client.read_change_stream() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadChangeStreamRequest() - -@pytest.mark.asyncio -async def test_read_change_stream_async(transport: str = 'grpc_asyncio', request_type=bigtable.ReadChangeStreamRequest): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_change_stream), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[bigtable.ReadChangeStreamResponse()]) - response = await client.read_change_stream(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable.ReadChangeStreamRequest() - - # Establish that the response is the type that we expect. - message = await response.read() - assert isinstance(message, bigtable.ReadChangeStreamResponse) - - -@pytest.mark.asyncio -async def test_read_change_stream_async_from_dict(): - await test_read_change_stream_async(request_type=dict) - - -def test_read_change_stream_field_headers(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadChangeStreamRequest() - - request.table_name = 'table_name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_change_stream), - '__call__') as call: - call.return_value = iter([bigtable.ReadChangeStreamResponse()]) - client.read_change_stream(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'table_name=table_name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_read_change_stream_field_headers_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable.ReadChangeStreamRequest() - - request.table_name = 'table_name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_change_stream), - '__call__') as call: - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - call.return_value.read = mock.AsyncMock(side_effect=[bigtable.ReadChangeStreamResponse()]) - await client.read_change_stream(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'table_name=table_name_value', - ) in kw['metadata'] - - -def test_read_change_stream_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_change_stream), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.ReadChangeStreamResponse()]) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.read_change_stream( - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - - -def test_read_change_stream_flattened_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_change_stream( - bigtable.ReadChangeStreamRequest(), - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - -@pytest.mark.asyncio -async def test_read_change_stream_flattened_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.read_change_stream), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iter([bigtable.ReadChangeStreamResponse()]) - - call.return_value = mock.Mock(aio.UnaryStreamCall, autospec=True) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.read_change_stream( - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].table_name - mock_val = 'table_name_value' - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_read_change_stream_flattened_error_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.read_change_stream( - bigtable.ReadChangeStreamRequest(), - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.ReadRowsRequest, - dict, -]) -def test_read_rows_rest(request_type): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadRowsResponse( - last_scanned_row_key=b'last_scanned_row_key_blob', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.read_rows(request) - - assert isinstance(response, Iterable) - response = next(response) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadRowsResponse) - assert response.last_scanned_row_key == b'last_scanned_row_key_blob' - - -def test_read_rows_rest_required_fields(request_type=bigtable.ReadRowsRequest): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["table_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).read_rows._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["tableName"] = 'table_name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).read_rows._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == 'table_name_value' - - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadRowsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.read_rows(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_read_rows_rest_unset_required_fields(): - transport = transports.BigtableRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.read_rows._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("tableName", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_read_rows_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableRestInterceptor, "post_read_rows") as post, \ - mock.patch.object(transports.BigtableRestInterceptor, "pre_read_rows") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.ReadRowsRequest.pb(bigtable.ReadRowsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.ReadRowsResponse.to_json(bigtable.ReadRowsResponse()) - req.return_value._content = "[{}]".format(req.return_value._content) - - request = bigtable.ReadRowsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.ReadRowsResponse() - - client.read_rows(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_read_rows_rest_bad_request(transport: str = 'rest', request_type=bigtable.ReadRowsRequest): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.read_rows(request) - - -def test_read_rows_rest_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadRowsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ReadRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - client.read_rows(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:readRows" % client.transport._host, args[1]) - - -def test_read_rows_rest_flattened_error(transport: str = 'rest'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_rows( - bigtable.ReadRowsRequest(), - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - -def test_read_rows_rest_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.SampleRowKeysRequest, - dict, -]) -def test_sample_row_keys_rest(request_type): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.SampleRowKeysResponse( - row_key=b'row_key_blob', - offset_bytes=1293, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.sample_row_keys(request) - - assert isinstance(response, Iterable) - response = next(response) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.SampleRowKeysResponse) - assert response.row_key == b'row_key_blob' - assert response.offset_bytes == 1293 - - -def test_sample_row_keys_rest_required_fields(request_type=bigtable.SampleRowKeysRequest): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["table_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).sample_row_keys._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["tableName"] = 'table_name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).sample_row_keys._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("app_profile_id", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == 'table_name_value' - - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable.SampleRowKeysResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.sample_row_keys(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_sample_row_keys_rest_unset_required_fields(): - transport = transports.BigtableRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.sample_row_keys._get_unset_required_fields({}) - assert set(unset_fields) == (set(("appProfileId", )) & set(("tableName", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_sample_row_keys_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableRestInterceptor, "post_sample_row_keys") as post, \ - mock.patch.object(transports.BigtableRestInterceptor, "pre_sample_row_keys") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.SampleRowKeysRequest.pb(bigtable.SampleRowKeysRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.SampleRowKeysResponse.to_json(bigtable.SampleRowKeysResponse()) - req.return_value._content = "[{}]".format(req.return_value._content) - - request = bigtable.SampleRowKeysRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.SampleRowKeysResponse() - - client.sample_row_keys(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_sample_row_keys_rest_bad_request(transport: str = 'rest', request_type=bigtable.SampleRowKeysRequest): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.sample_row_keys(request) - - -def test_sample_row_keys_rest_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.SampleRowKeysResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.SampleRowKeysResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - client.sample_row_keys(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys" % client.transport._host, args[1]) - - -def test_sample_row_keys_rest_flattened_error(transport: str = 'rest'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.sample_row_keys( - bigtable.SampleRowKeysRequest(), - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - -def test_sample_row_keys_rest_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.MutateRowRequest, - dict, -]) -def test_mutate_row_rest(request_type): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowResponse( - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.MutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.mutate_row(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.MutateRowResponse) - - -def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["table_name"] = "" - request_init["row_key"] = b'' - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).mutate_row._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["tableName"] = 'table_name_value' - jsonified_request["rowKey"] = b'row_key_blob' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).mutate_row._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == 'table_name_value' - assert "rowKey" in jsonified_request - assert jsonified_request["rowKey"] == b'row_key_blob' - - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable.MutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.mutate_row(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_mutate_row_rest_unset_required_fields(): - transport = transports.BigtableRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.mutate_row._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("tableName", "rowKey", "mutations", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_mutate_row_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableRestInterceptor, "post_mutate_row") as post, \ - mock.patch.object(transports.BigtableRestInterceptor, "pre_mutate_row") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.MutateRowRequest.pb(bigtable.MutateRowRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.MutateRowResponse.to_json(bigtable.MutateRowResponse()) - - request = bigtable.MutateRowRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.MutateRowResponse() - - client.mutate_row(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_mutate_row_rest_bad_request(transport: str = 'rest', request_type=bigtable.MutateRowRequest): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.mutate_row(request) - - -def test_mutate_row_rest_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - table_name='table_name_value', - row_key=b'row_key_blob', - mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - app_profile_id='app_profile_id_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.MutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.mutate_row(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow" % client.transport._host, args[1]) - - -def test_mutate_row_rest_flattened_error(transport: str = 'rest'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.mutate_row( - bigtable.MutateRowRequest(), - table_name='table_name_value', - row_key=b'row_key_blob', - mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - app_profile_id='app_profile_id_value', - ) - - -def test_mutate_row_rest_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.MutateRowsRequest, - dict, -]) -def test_mutate_rows_rest(request_type): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowsResponse( - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.MutateRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.mutate_rows(request) - - assert isinstance(response, Iterable) - response = next(response) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.MutateRowsResponse) - - -def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsRequest): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["table_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).mutate_rows._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["tableName"] = 'table_name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).mutate_rows._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == 'table_name_value' - - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable.MutateRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.mutate_rows(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_mutate_rows_rest_unset_required_fields(): - transport = transports.BigtableRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.mutate_rows._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("tableName", "entries", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_mutate_rows_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableRestInterceptor, "post_mutate_rows") as post, \ - mock.patch.object(transports.BigtableRestInterceptor, "pre_mutate_rows") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.MutateRowsRequest.pb(bigtable.MutateRowsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.MutateRowsResponse.to_json(bigtable.MutateRowsResponse()) - req.return_value._content = "[{}]".format(req.return_value._content) - - request = bigtable.MutateRowsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.MutateRowsResponse() - - client.mutate_rows(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_mutate_rows_rest_bad_request(transport: str = 'rest', request_type=bigtable.MutateRowsRequest): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.mutate_rows(request) - - -def test_mutate_rows_rest_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.MutateRowsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - table_name='table_name_value', - entries=[bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')], - app_profile_id='app_profile_id_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.MutateRowsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - client.mutate_rows(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows" % client.transport._host, args[1]) - - -def test_mutate_rows_rest_flattened_error(transport: str = 'rest'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.mutate_rows( - bigtable.MutateRowsRequest(), - table_name='table_name_value', - entries=[bigtable.MutateRowsRequest.Entry(row_key=b'row_key_blob')], - app_profile_id='app_profile_id_value', - ) - - -def test_mutate_rows_rest_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.CheckAndMutateRowRequest, - dict, -]) -def test_check_and_mutate_row_rest(request_type): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.CheckAndMutateRowResponse( - predicate_matched=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.check_and_mutate_row(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.CheckAndMutateRowResponse) - assert response.predicate_matched is True - - -def test_check_and_mutate_row_rest_required_fields(request_type=bigtable.CheckAndMutateRowRequest): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["table_name"] = "" - request_init["row_key"] = b'' - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).check_and_mutate_row._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["tableName"] = 'table_name_value' - jsonified_request["rowKey"] = b'row_key_blob' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).check_and_mutate_row._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == 'table_name_value' - assert "rowKey" in jsonified_request - assert jsonified_request["rowKey"] == b'row_key_blob' - - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable.CheckAndMutateRowResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.check_and_mutate_row(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_check_and_mutate_row_rest_unset_required_fields(): - transport = transports.BigtableRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.check_and_mutate_row._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("tableName", "rowKey", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_check_and_mutate_row_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableRestInterceptor, "post_check_and_mutate_row") as post, \ - mock.patch.object(transports.BigtableRestInterceptor, "pre_check_and_mutate_row") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.CheckAndMutateRowRequest.pb(bigtable.CheckAndMutateRowRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.CheckAndMutateRowResponse.to_json(bigtable.CheckAndMutateRowResponse()) - - request = bigtable.CheckAndMutateRowRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.CheckAndMutateRowResponse() - - client.check_and_mutate_row(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_check_and_mutate_row_rest_bad_request(transport: str = 'rest', request_type=bigtable.CheckAndMutateRowRequest): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.check_and_mutate_row(request) - - -def test_check_and_mutate_row_rest_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.CheckAndMutateRowResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - table_name='table_name_value', - row_key=b'row_key_blob', - predicate_filter=data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])), - true_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - false_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - app_profile_id='app_profile_id_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.CheckAndMutateRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.check_and_mutate_row(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow" % client.transport._host, args[1]) - - -def test_check_and_mutate_row_rest_flattened_error(transport: str = 'rest'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.check_and_mutate_row( - bigtable.CheckAndMutateRowRequest(), - table_name='table_name_value', - row_key=b'row_key_blob', - predicate_filter=data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=data.RowFilter.Chain(filters=[data.RowFilter(chain=None)]))])), - true_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - false_mutations=[data.Mutation(set_cell=data.Mutation.SetCell(family_name='family_name_value'))], - app_profile_id='app_profile_id_value', - ) - - -def test_check_and_mutate_row_rest_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.PingAndWarmRequest, - dict, -]) -def test_ping_and_warm_rest(request_type): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.PingAndWarmResponse( - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.PingAndWarmResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.ping_and_warm(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.PingAndWarmResponse) - - -def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmRequest): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).ping_and_warm._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).ping_and_warm._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable.PingAndWarmResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable.PingAndWarmResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.ping_and_warm(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_ping_and_warm_rest_unset_required_fields(): - transport = transports.BigtableRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.ping_and_warm._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_ping_and_warm_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableRestInterceptor, "post_ping_and_warm") as post, \ - mock.patch.object(transports.BigtableRestInterceptor, "pre_ping_and_warm") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.PingAndWarmRequest.pb(bigtable.PingAndWarmRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.PingAndWarmResponse.to_json(bigtable.PingAndWarmResponse()) - - request = bigtable.PingAndWarmRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.PingAndWarmResponse() - - client.ping_and_warm(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_ping_and_warm_rest_bad_request(transport: str = 'rest', request_type=bigtable.PingAndWarmRequest): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.ping_and_warm(request) - - -def test_ping_and_warm_rest_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.PingAndWarmResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - app_profile_id='app_profile_id_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.PingAndWarmResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.ping_and_warm(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*}:ping" % client.transport._host, args[1]) - - -def test_ping_and_warm_rest_flattened_error(transport: str = 'rest'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.ping_and_warm( - bigtable.PingAndWarmRequest(), - name='name_value', - app_profile_id='app_profile_id_value', - ) - - -def test_ping_and_warm_rest_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.ReadModifyWriteRowRequest, - dict, -]) -def test_read_modify_write_row_rest(request_type): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadModifyWriteRowResponse( - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.read_modify_write_row(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadModifyWriteRowResponse) - - -def test_read_modify_write_row_rest_required_fields(request_type=bigtable.ReadModifyWriteRowRequest): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["table_name"] = "" - request_init["row_key"] = b'' - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).read_modify_write_row._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["tableName"] = 'table_name_value' - jsonified_request["rowKey"] = b'row_key_blob' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).read_modify_write_row._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == 'table_name_value' - assert "rowKey" in jsonified_request - assert jsonified_request["rowKey"] == b'row_key_blob' - - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadModifyWriteRowResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.read_modify_write_row(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_read_modify_write_row_rest_unset_required_fields(): - transport = transports.BigtableRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.read_modify_write_row._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("tableName", "rowKey", "rules", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_read_modify_write_row_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableRestInterceptor, "post_read_modify_write_row") as post, \ - mock.patch.object(transports.BigtableRestInterceptor, "pre_read_modify_write_row") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.ReadModifyWriteRowRequest.pb(bigtable.ReadModifyWriteRowRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.ReadModifyWriteRowResponse.to_json(bigtable.ReadModifyWriteRowResponse()) - - request = bigtable.ReadModifyWriteRowRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.ReadModifyWriteRowResponse() - - client.read_modify_write_row(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_read_modify_write_row_rest_bad_request(transport: str = 'rest', request_type=bigtable.ReadModifyWriteRowRequest): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.read_modify_write_row(request) - - -def test_read_modify_write_row_rest_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadModifyWriteRowResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - table_name='table_name_value', - row_key=b'row_key_blob', - rules=[data.ReadModifyWriteRule(family_name='family_name_value')], - app_profile_id='app_profile_id_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ReadModifyWriteRowResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.read_modify_write_row(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow" % client.transport._host, args[1]) - - -def test_read_modify_write_row_rest_flattened_error(transport: str = 'rest'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_modify_write_row( - bigtable.ReadModifyWriteRowRequest(), - table_name='table_name_value', - row_key=b'row_key_blob', - rules=[data.ReadModifyWriteRule(family_name='family_name_value')], - app_profile_id='app_profile_id_value', - ) - - -def test_read_modify_write_row_rest_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.GenerateInitialChangeStreamPartitionsRequest, - dict, -]) -def test_generate_initial_change_stream_partitions_rest(request_type): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse( - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.generate_initial_change_stream_partitions(request) - - assert isinstance(response, Iterable) - response = next(response) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.GenerateInitialChangeStreamPartitionsResponse) - - -def test_generate_initial_change_stream_partitions_rest_required_fields(request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["table_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).generate_initial_change_stream_partitions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["tableName"] = 'table_name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).generate_initial_change_stream_partitions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == 'table_name_value' - - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.generate_initial_change_stream_partitions(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_generate_initial_change_stream_partitions_rest_unset_required_fields(): - transport = transports.BigtableRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.generate_initial_change_stream_partitions._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("tableName", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_generate_initial_change_stream_partitions_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableRestInterceptor, "post_generate_initial_change_stream_partitions") as post, \ - mock.patch.object(transports.BigtableRestInterceptor, "pre_generate_initial_change_stream_partitions") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.GenerateInitialChangeStreamPartitionsRequest.pb(bigtable.GenerateInitialChangeStreamPartitionsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.GenerateInitialChangeStreamPartitionsResponse.to_json(bigtable.GenerateInitialChangeStreamPartitionsResponse()) - req.return_value._content = "[{}]".format(req.return_value._content) - - request = bigtable.GenerateInitialChangeStreamPartitionsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() - - client.generate_initial_change_stream_partitions(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_generate_initial_change_stream_partitions_rest_bad_request(transport: str = 'rest', request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.generate_initial_change_stream_partitions(request) - - -def test_generate_initial_change_stream_partitions_rest_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.GenerateInitialChangeStreamPartitionsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - client.generate_initial_change_stream_partitions(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions" % client.transport._host, args[1]) - - -def test_generate_initial_change_stream_partitions_rest_flattened_error(transport: str = 'rest'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.generate_initial_change_stream_partitions( - bigtable.GenerateInitialChangeStreamPartitionsRequest(), - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - -def test_generate_initial_change_stream_partitions_rest_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable.ReadChangeStreamRequest, - dict, -]) -def test_read_change_stream_rest(request_type): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadChangeStreamResponse( - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ReadChangeStreamResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.read_change_stream(request) - - assert isinstance(response, Iterable) - response = next(response) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable.ReadChangeStreamResponse) - - -def test_read_change_stream_rest_required_fields(request_type=bigtable.ReadChangeStreamRequest): - transport_class = transports.BigtableRestTransport - - request_init = {} - request_init["table_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).read_change_stream._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["tableName"] = 'table_name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).read_change_stream._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "tableName" in jsonified_request - assert jsonified_request["tableName"] == 'table_name_value' - - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadChangeStreamResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable.ReadChangeStreamResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - response = client.read_change_stream(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_read_change_stream_rest_unset_required_fields(): - transport = transports.BigtableRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.read_change_stream._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("tableName", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_read_change_stream_rest_interceptors(null_interceptor): - transport = transports.BigtableRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), - ) - client = BigtableClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableRestInterceptor, "post_read_change_stream") as post, \ - mock.patch.object(transports.BigtableRestInterceptor, "pre_read_change_stream") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable.ReadChangeStreamRequest.pb(bigtable.ReadChangeStreamRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable.ReadChangeStreamResponse.to_json(bigtable.ReadChangeStreamResponse()) - req.return_value._content = "[{}]".format(req.return_value._content) - - request = bigtable.ReadChangeStreamRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable.ReadChangeStreamResponse() - - client.read_change_stream(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_read_change_stream_rest_bad_request(transport: str = 'rest', request_type=bigtable.ReadChangeStreamRequest): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.read_change_stream(request) - - -def test_read_change_stream_rest_flattened(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable.ReadChangeStreamResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'table_name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable.ReadChangeStreamResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - json_return_value = "[{}]".format(json_return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - with mock.patch.object(response_value, 'iter_content') as iter_content: - iter_content.return_value = iter(json_return_value) - client.read_change_stream(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream" % client.transport._host, args[1]) - - -def test_read_change_stream_rest_flattened_error(transport: str = 'rest'): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.read_change_stream( - bigtable.ReadChangeStreamRequest(), - table_name='table_name_value', - app_profile_id='app_profile_id_value', - ) - - -def test_read_change_stream_rest_error(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - with pytest.raises(ValueError): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - with pytest.raises(ValueError): - client = BigtableClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableClient( - client_options=options, - transport=transport, - ) - - # It is an error to provide an api_key and a credential. - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableClient( - client_options=options, - credentials=_AnonymousCredentialsWithUniverseDomain() - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - with pytest.raises(ValueError): - client = BigtableClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - client = BigtableClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.BigtableGrpcAsyncIOTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.BigtableGrpcTransport, - transports.BigtableGrpcAsyncIOTransport, - transports.BigtableRestTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - transport_class() - adc.assert_called_once() - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "rest", -]) -def test_transport_kind(transport_name): - transport = BigtableClient.get_transport_class(transport_name)( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - assert transport.kind == transport_name - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - assert isinstance( - client.transport, - transports.BigtableGrpcTransport, - ) - -def test_bigtable_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.BigtableTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - credentials_file="credentials.json" - ) - - -def test_bigtable_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.BigtableTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'read_rows', - 'sample_row_keys', - 'mutate_row', - 'mutate_rows', - 'check_and_mutate_row', - 'ping_and_warm', - 'read_modify_write_row', - 'generate_initial_change_stream_partitions', - 'read_change_stream', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Catch all for all remaining methods and properties - remainder = [ - 'kind', - ] - for r in remainder: - with pytest.raises(NotImplementedError): - getattr(transport, r)() - - -def test_bigtable_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - transport = transports.BigtableTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/bigtable.data', - 'https://www.googleapis.com/auth/bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-bigtable.data', - 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - quota_project_id="octopus", - ) - - -def test_bigtable_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - transport = transports.BigtableTransport() - adc.assert_called_once() - - -def test_bigtable_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - BigtableClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/bigtable.data', - 'https://www.googleapis.com/auth/bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-bigtable.data', - 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableGrpcTransport, - transports.BigtableGrpcAsyncIOTransport, - ], -) -def test_bigtable_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/bigtable.data', 'https://www.googleapis.com/auth/bigtable.data.readonly', 'https://www.googleapis.com/auth/cloud-bigtable.data', 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/cloud-platform.read-only',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableGrpcTransport, - transports.BigtableGrpcAsyncIOTransport, - transports.BigtableRestTransport, - ], -) -def test_bigtable_transport_auth_gdch_credentials(transport_class): - host = 'https://language.com' - api_audience_tests = [None, 'https://language2.com'] - api_audience_expect = [host, 'https://language2.com'] - for t, e in zip(api_audience_tests, api_audience_expect): - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - gdch_mock = mock.MagicMock() - type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) - adc.return_value = (gdch_mock, None) - transport_class(host=host, api_audience=t) - gdch_mock.with_gdch_audience.assert_called_once_with( - e - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.BigtableGrpcTransport, grpc_helpers), - (transports.BigtableGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_bigtable_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = _AnonymousCredentialsWithUniverseDomain() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "bigtable.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/bigtable.data', - 'https://www.googleapis.com/auth/bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-bigtable.data', - 'https://www.googleapis.com/auth/cloud-bigtable.data.readonly', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - scopes=["1", "2"], - default_host="bigtable.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport]) -def test_bigtable_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = _AnonymousCredentialsWithUniverseDomain() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - -def test_bigtable_http_transport_client_cert_source_for_mtls(): - cred = _AnonymousCredentialsWithUniverseDomain() - with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: - transports.BigtableRestTransport ( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) - - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_bigtable_host_no_port(transport_name): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - client_options=client_options.ClientOptions(api_endpoint='bigtable.googleapis.com'), - transport=transport_name, - ) - assert client.transport._host == ( - 'bigtable.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://bigtable.googleapis.com' - ) - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_bigtable_host_with_port(transport_name): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - client_options=client_options.ClientOptions(api_endpoint='bigtable.googleapis.com:8000'), - transport=transport_name, - ) - assert client.transport._host == ( - 'bigtable.googleapis.com:8000' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://bigtable.googleapis.com:8000' - ) - -@pytest.mark.parametrize("transport_name", [ - "rest", -]) -def test_bigtable_client_transport_session_collision(transport_name): - creds1 = _AnonymousCredentialsWithUniverseDomain() - creds2 = _AnonymousCredentialsWithUniverseDomain() - client1 = BigtableClient( - credentials=creds1, - transport=transport_name, - ) - client2 = BigtableClient( - credentials=creds2, - transport=transport_name, - ) - session1 = client1.transport.read_rows._session - session2 = client2.transport.read_rows._session - assert session1 != session2 - session1 = client1.transport.sample_row_keys._session - session2 = client2.transport.sample_row_keys._session - assert session1 != session2 - session1 = client1.transport.mutate_row._session - session2 = client2.transport.mutate_row._session - assert session1 != session2 - session1 = client1.transport.mutate_rows._session - session2 = client2.transport.mutate_rows._session - assert session1 != session2 - session1 = client1.transport.check_and_mutate_row._session - session2 = client2.transport.check_and_mutate_row._session - assert session1 != session2 - session1 = client1.transport.ping_and_warm._session - session2 = client2.transport.ping_and_warm._session - assert session1 != session2 - session1 = client1.transport.read_modify_write_row._session - session2 = client2.transport.read_modify_write_row._session - assert session1 != session2 - session1 = client1.transport.generate_initial_change_stream_partitions._session - session2 = client2.transport.generate_initial_change_stream_partitions._session - assert session1 != session2 - session1 = client1.transport.read_change_stream._session - session2 = client2.transport.read_change_stream._session - assert session1 != session2 -def test_bigtable_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.BigtableGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_bigtable_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.BigtableGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport]) -def test_bigtable_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = _AnonymousCredentialsWithUniverseDomain() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport]) -def test_bigtable_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_instance_path(): - project = "squid" - instance = "clam" - expected = "projects/{project}/instances/{instance}".format(project=project, instance=instance, ) - actual = BigtableClient.instance_path(project, instance) - assert expected == actual - - -def test_parse_instance_path(): - expected = { - "project": "whelk", - "instance": "octopus", - } - path = BigtableClient.instance_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableClient.parse_instance_path(path) - assert expected == actual - -def test_table_path(): - project = "oyster" - instance = "nudibranch" - table = "cuttlefish" - expected = "projects/{project}/instances/{instance}/tables/{table}".format(project=project, instance=instance, table=table, ) - actual = BigtableClient.table_path(project, instance, table) - assert expected == actual - - -def test_parse_table_path(): - expected = { - "project": "mussel", - "instance": "winkle", - "table": "nautilus", - } - path = BigtableClient.table_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableClient.parse_table_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "scallop" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = BigtableClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "abalone", - } - path = BigtableClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "squid" - expected = "folders/{folder}".format(folder=folder, ) - actual = BigtableClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "clam", - } - path = BigtableClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "whelk" - expected = "organizations/{organization}".format(organization=organization, ) - actual = BigtableClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "octopus", - } - path = BigtableClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "oyster" - expected = "projects/{project}".format(project=project, ) - actual = BigtableClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nudibranch", - } - path = BigtableClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "cuttlefish" - location = "mussel" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = BigtableClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "winkle", - "location": "nautilus", - } - path = BigtableClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_with_default_client_info(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.BigtableTransport, '_prep_wrapped_messages') as prep: - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.BigtableTransport, '_prep_wrapped_messages') as prep: - transport_class = BigtableClient.get_transport_class() - transport = transport_class( - credentials=_AnonymousCredentialsWithUniverseDomain(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = BigtableAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - - -def test_transport_close(): - transports = { - "rest": "_session", - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'rest', - 'grpc', - ] - for transport in transports: - client = BigtableClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() - -@pytest.mark.parametrize("client_class,transport_class", [ - (BigtableClient, transports.BigtableGrpcTransport), - (BigtableAsyncClient, transports.BigtableGrpcAsyncIOTransport), -]) -def test_api_key_credentials(client_class, transport_class): - with mock.patch.object( - google.auth._default, "get_api_key_credentials", create=True - ) as get_api_key_credentials: - mock_cred = mock.Mock() - get_api_key_credentials.return_value = mock_cred - options = client_options.ClientOptions() - options.api_key = "api_key" - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=mock_cred, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) diff --git a/owl-bot-staging/bigtable_admin/v2/.coveragerc b/owl-bot-staging/bigtable_admin/v2/.coveragerc deleted file mode 100644 index 2fb79c4ed..000000000 --- a/owl-bot-staging/bigtable_admin/v2/.coveragerc +++ /dev/null @@ -1,13 +0,0 @@ -[run] -branch = True - -[report] -show_missing = True -omit = - google/cloud/bigtable_admin/__init__.py - google/cloud/bigtable_admin/gapic_version.py -exclude_lines = - # Re-enable the standard pragma - pragma: NO COVER - # Ignore debug-only repr - def __repr__ diff --git a/owl-bot-staging/bigtable_admin/v2/.flake8 b/owl-bot-staging/bigtable_admin/v2/.flake8 deleted file mode 100644 index 29227d4cf..000000000 --- a/owl-bot-staging/bigtable_admin/v2/.flake8 +++ /dev/null @@ -1,33 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Copyright 2020 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# Generated by synthtool. DO NOT EDIT! -[flake8] -ignore = E203, E266, E501, W503 -exclude = - # Exclude generated code. - **/proto/** - **/gapic/** - **/services/** - **/types/** - *_pb2.py - - # Standard linting exemptions. - **/.nox/** - __pycache__, - .git, - *.pyc, - conf.py diff --git a/owl-bot-staging/bigtable_admin/v2/MANIFEST.in b/owl-bot-staging/bigtable_admin/v2/MANIFEST.in deleted file mode 100644 index d9b71fd61..000000000 --- a/owl-bot-staging/bigtable_admin/v2/MANIFEST.in +++ /dev/null @@ -1,2 +0,0 @@ -recursive-include google/cloud/bigtable_admin *.py -recursive-include google/cloud/bigtable_admin_v2 *.py diff --git a/owl-bot-staging/bigtable_admin/v2/README.rst b/owl-bot-staging/bigtable_admin/v2/README.rst deleted file mode 100644 index 93f147497..000000000 --- a/owl-bot-staging/bigtable_admin/v2/README.rst +++ /dev/null @@ -1,49 +0,0 @@ -Python Client for Google Cloud Bigtable Admin API -================================================= - -Quick Start ------------ - -In order to use this library, you first need to go through the following steps: - -1. `Select or create a Cloud Platform project.`_ -2. `Enable billing for your project.`_ -3. Enable the Google Cloud Bigtable Admin API. -4. `Setup Authentication.`_ - -.. _Select or create a Cloud Platform project.: https://console.cloud.google.com/project -.. _Enable billing for your project.: https://cloud.google.com/billing/docs/how-to/modify-project#enable_billing_for_a_project -.. _Setup Authentication.: https://googleapis.dev/python/google-api-core/latest/auth.html - -Installation -~~~~~~~~~~~~ - -Install this library in a `virtualenv`_ using pip. `virtualenv`_ is a tool to -create isolated Python environments. The basic problem it addresses is one of -dependencies and versions, and indirectly permissions. - -With `virtualenv`_, it's possible to install this library without needing system -install permissions, and without clashing with the installed system -dependencies. - -.. _`virtualenv`: https://virtualenv.pypa.io/en/latest/ - - -Mac/Linux -^^^^^^^^^ - -.. code-block:: console - - python3 -m venv - source /bin/activate - /bin/pip install /path/to/library - - -Windows -^^^^^^^ - -.. code-block:: console - - python3 -m venv - \Scripts\activate - \Scripts\pip.exe install \path\to\library diff --git a/owl-bot-staging/bigtable_admin/v2/docs/_static/custom.css b/owl-bot-staging/bigtable_admin/v2/docs/_static/custom.css deleted file mode 100644 index 06423be0b..000000000 --- a/owl-bot-staging/bigtable_admin/v2/docs/_static/custom.css +++ /dev/null @@ -1,3 +0,0 @@ -dl.field-list > dt { - min-width: 100px -} diff --git a/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_instance_admin.rst b/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_instance_admin.rst deleted file mode 100644 index 42f7caad7..000000000 --- a/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_instance_admin.rst +++ /dev/null @@ -1,10 +0,0 @@ -BigtableInstanceAdmin ---------------------------------------- - -.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin - :members: - :inherited-members: - -.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_table_admin.rst b/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_table_admin.rst deleted file mode 100644 index e10ff3ac6..000000000 --- a/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/bigtable_table_admin.rst +++ /dev/null @@ -1,10 +0,0 @@ -BigtableTableAdmin ------------------------------------- - -.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_table_admin - :members: - :inherited-members: - -.. automodule:: google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers - :members: - :inherited-members: diff --git a/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/services_.rst b/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/services_.rst deleted file mode 100644 index ea55c7da1..000000000 --- a/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/services_.rst +++ /dev/null @@ -1,7 +0,0 @@ -Services for Google Cloud Bigtable Admin v2 API -=============================================== -.. toctree:: - :maxdepth: 2 - - bigtable_instance_admin - bigtable_table_admin diff --git a/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/types_.rst b/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/types_.rst deleted file mode 100644 index 2f935927a..000000000 --- a/owl-bot-staging/bigtable_admin/v2/docs/bigtable_admin_v2/types_.rst +++ /dev/null @@ -1,6 +0,0 @@ -Types for Google Cloud Bigtable Admin v2 API -============================================ - -.. automodule:: google.cloud.bigtable_admin_v2.types - :members: - :show-inheritance: diff --git a/owl-bot-staging/bigtable_admin/v2/docs/conf.py b/owl-bot-staging/bigtable_admin/v2/docs/conf.py deleted file mode 100644 index 0846814bd..000000000 --- a/owl-bot-staging/bigtable_admin/v2/docs/conf.py +++ /dev/null @@ -1,376 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# -# google-cloud-bigtable-admin documentation build configuration file -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys -import os -import shlex - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath("..")) - -__version__ = "0.1.0" - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -needs_sphinx = "4.0.1" - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.intersphinx", - "sphinx.ext.coverage", - "sphinx.ext.napoleon", - "sphinx.ext.todo", - "sphinx.ext.viewcode", -] - -# autodoc/autosummary flags -autoclass_content = "both" -autodoc_default_flags = ["members"] -autosummary_generate = True - - -# Add any paths that contain templates here, relative to this directory. -templates_path = ["_templates"] - -# Allow markdown includes (so releases.md can include CHANGLEOG.md) -# http://www.sphinx-doc.org/en/master/markdown.html -source_parsers = {".md": "recommonmark.parser.CommonMarkParser"} - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -source_suffix = [".rst", ".md"] - -# The encoding of source files. -# source_encoding = 'utf-8-sig' - -# The root toctree document. -root_doc = "index" - -# General information about the project. -project = u"google-cloud-bigtable-admin" -copyright = u"2023, Google, LLC" -author = u"Google APIs" # TODO: autogenerate this bit - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The full version, including alpha/beta/rc tags. -release = __version__ -# The short X.Y version. -version = ".".join(release.split(".")[0:2]) - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = 'en' - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -# today = '' -# Else, today_fmt is used as the format for a strftime call. -# today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ["_build"] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -# default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -# add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -# add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -# show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = "sphinx" - -# A list of ignored prefixes for module index sorting. -# modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -# keep_warnings = False - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = True - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = "alabaster" - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -html_theme_options = { - "description": "Google Cloud Client Libraries for Python", - "github_user": "googleapis", - "github_repo": "google-cloud-python", - "github_banner": True, - "font_family": "'Roboto', Georgia, sans", - "head_font_family": "'Roboto', Georgia, serif", - "code_font_family": "'Roboto Mono', 'Consolas', monospace", -} - -# Add any paths that contain custom themes here, relative to this directory. -# html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -# html_title = None - -# A shorter title for the navigation bar. Default is the same as html_title. -# html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -# html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -# html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ["_static"] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -# html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -# html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -# html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -# html_sidebars = {} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -# html_additional_pages = {} - -# If false, no module index is generated. -# html_domain_indices = True - -# If false, no index is generated. -# html_use_index = True - -# If true, the index is split into individual pages for each letter. -# html_split_index = False - -# If true, links to the reST sources are added to the pages. -# html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -# html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -# html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -# html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -# html_file_suffix = None - -# Language to be used for generating the HTML full-text search index. -# Sphinx supports the following languages: -# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' -# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr' -# html_search_language = 'en' - -# A dictionary with options for the search language support, empty by default. -# Now only 'ja' uses this config value -# html_search_options = {'type': 'default'} - -# The name of a javascript file (relative to the configuration directory) that -# implements a search results scorer. If empty, the default will be used. -# html_search_scorer = 'scorer.js' - -# Output file base name for HTML help builder. -htmlhelp_basename = "google-cloud-bigtable-admin-doc" - -# -- Options for warnings ------------------------------------------------------ - - -suppress_warnings = [ - # Temporarily suppress this to avoid "more than one target found for - # cross-reference" warning, which are intractable for us to avoid while in - # a mono-repo. - # See https://github.com/sphinx-doc/sphinx/blob - # /2a65ffeef5c107c19084fabdd706cdff3f52d93c/sphinx/domains/python.py#L843 - "ref.python" -] - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # 'papersize': 'letterpaper', - # The font size ('10pt', '11pt' or '12pt'). - # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. - # 'preamble': '', - # Latex figure (float) alignment - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ( - root_doc, - "google-cloud-bigtable-admin.tex", - u"google-cloud-bigtable-admin Documentation", - author, - "manual", - ) -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -# latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -# latex_use_parts = False - -# If true, show page references after internal links. -# latex_show_pagerefs = False - -# If true, show URL addresses after external links. -# latex_show_urls = False - -# Documents to append as an appendix to all manuals. -# latex_appendices = [] - -# If false, no module index is generated. -# latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ( - root_doc, - "google-cloud-bigtable-admin", - u"Google Cloud Bigtable Admin Documentation", - [author], - 1, - ) -] - -# If true, show URL addresses after external links. -# man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ( - root_doc, - "google-cloud-bigtable-admin", - u"google-cloud-bigtable-admin Documentation", - author, - "google-cloud-bigtable-admin", - "GAPIC library for Google Cloud Bigtable Admin API", - "APIs", - ) -] - -# Documents to append as an appendix to all manuals. -# texinfo_appendices = [] - -# If false, no module index is generated. -# texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -# texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -# texinfo_no_detailmenu = False - - -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = { - "python": ("http://python.readthedocs.org/en/latest/", None), - "gax": ("https://gax-python.readthedocs.org/en/latest/", None), - "google-auth": ("https://google-auth.readthedocs.io/en/stable", None), - "google-gax": ("https://gax-python.readthedocs.io/en/latest/", None), - "google.api_core": ("https://googleapis.dev/python/google-api-core/latest/", None), - "grpc": ("https://grpc.io/grpc/python/", None), - "requests": ("http://requests.kennethreitz.org/en/stable/", None), - "proto": ("https://proto-plus-python.readthedocs.io/en/stable", None), - "protobuf": ("https://googleapis.dev/python/protobuf/latest/", None), -} - - -# Napoleon settings -napoleon_google_docstring = True -napoleon_numpy_docstring = True -napoleon_include_private_with_doc = False -napoleon_include_special_with_doc = True -napoleon_use_admonition_for_examples = False -napoleon_use_admonition_for_notes = False -napoleon_use_admonition_for_references = False -napoleon_use_ivar = False -napoleon_use_param = True -napoleon_use_rtype = True diff --git a/owl-bot-staging/bigtable_admin/v2/docs/index.rst b/owl-bot-staging/bigtable_admin/v2/docs/index.rst deleted file mode 100644 index ed3f64340..000000000 --- a/owl-bot-staging/bigtable_admin/v2/docs/index.rst +++ /dev/null @@ -1,7 +0,0 @@ -API Reference -------------- -.. toctree:: - :maxdepth: 2 - - bigtable_admin_v2/services - bigtable_admin_v2/types diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/__init__.py deleted file mode 100644 index 62b0303ba..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/__init__.py +++ /dev/null @@ -1,189 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from google.cloud.bigtable_admin import gapic_version as package_version - -__version__ = package_version.__version__ - - -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.client import BigtableInstanceAdminClient -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.async_client import BigtableInstanceAdminAsyncClient -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.client import BigtableTableAdminClient -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin.async_client import BigtableTableAdminAsyncClient - -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import CreateAppProfileRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import CreateClusterMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import CreateClusterRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import CreateInstanceMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import CreateInstanceRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import DeleteAppProfileRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import DeleteClusterRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import DeleteInstanceRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import GetAppProfileRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import GetClusterRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import GetInstanceRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListAppProfilesRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListAppProfilesResponse -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListClustersRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListClustersResponse -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListHotTabletsRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListHotTabletsResponse -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListInstancesRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import ListInstancesResponse -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import PartialUpdateClusterMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import PartialUpdateClusterRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import PartialUpdateInstanceRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import UpdateAppProfileMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import UpdateAppProfileRequest -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import UpdateClusterMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_instance_admin import UpdateInstanceMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CheckConsistencyRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CheckConsistencyResponse -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CopyBackupRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateBackupMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateBackupRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateTableFromSnapshotMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateTableFromSnapshotRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import CreateTableRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import DeleteBackupRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import DeleteSnapshotRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import DeleteTableRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import DropRowRangeRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GenerateConsistencyTokenRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GenerateConsistencyTokenResponse -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetBackupRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetSnapshotRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import GetTableRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListBackupsRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListBackupsResponse -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListSnapshotsRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListSnapshotsResponse -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListTablesRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ListTablesResponse -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import ModifyColumnFamiliesRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import OptimizeRestoredTableMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import RestoreTableMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import RestoreTableRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import SnapshotTableMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import SnapshotTableRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import UndeleteTableMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import UndeleteTableRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import UpdateBackupRequest -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import UpdateTableMetadata -from google.cloud.bigtable_admin_v2.types.bigtable_table_admin import UpdateTableRequest -from google.cloud.bigtable_admin_v2.types.common import OperationProgress -from google.cloud.bigtable_admin_v2.types.common import StorageType -from google.cloud.bigtable_admin_v2.types.instance import AppProfile -from google.cloud.bigtable_admin_v2.types.instance import AutoscalingLimits -from google.cloud.bigtable_admin_v2.types.instance import AutoscalingTargets -from google.cloud.bigtable_admin_v2.types.instance import Cluster -from google.cloud.bigtable_admin_v2.types.instance import HotTablet -from google.cloud.bigtable_admin_v2.types.instance import Instance -from google.cloud.bigtable_admin_v2.types.table import Backup -from google.cloud.bigtable_admin_v2.types.table import BackupInfo -from google.cloud.bigtable_admin_v2.types.table import ChangeStreamConfig -from google.cloud.bigtable_admin_v2.types.table import ColumnFamily -from google.cloud.bigtable_admin_v2.types.table import EncryptionInfo -from google.cloud.bigtable_admin_v2.types.table import GcRule -from google.cloud.bigtable_admin_v2.types.table import RestoreInfo -from google.cloud.bigtable_admin_v2.types.table import Snapshot -from google.cloud.bigtable_admin_v2.types.table import Table -from google.cloud.bigtable_admin_v2.types.table import RestoreSourceType - -__all__ = ('BigtableInstanceAdminClient', - 'BigtableInstanceAdminAsyncClient', - 'BigtableTableAdminClient', - 'BigtableTableAdminAsyncClient', - 'CreateAppProfileRequest', - 'CreateClusterMetadata', - 'CreateClusterRequest', - 'CreateInstanceMetadata', - 'CreateInstanceRequest', - 'DeleteAppProfileRequest', - 'DeleteClusterRequest', - 'DeleteInstanceRequest', - 'GetAppProfileRequest', - 'GetClusterRequest', - 'GetInstanceRequest', - 'ListAppProfilesRequest', - 'ListAppProfilesResponse', - 'ListClustersRequest', - 'ListClustersResponse', - 'ListHotTabletsRequest', - 'ListHotTabletsResponse', - 'ListInstancesRequest', - 'ListInstancesResponse', - 'PartialUpdateClusterMetadata', - 'PartialUpdateClusterRequest', - 'PartialUpdateInstanceRequest', - 'UpdateAppProfileMetadata', - 'UpdateAppProfileRequest', - 'UpdateClusterMetadata', - 'UpdateInstanceMetadata', - 'CheckConsistencyRequest', - 'CheckConsistencyResponse', - 'CopyBackupMetadata', - 'CopyBackupRequest', - 'CreateBackupMetadata', - 'CreateBackupRequest', - 'CreateTableFromSnapshotMetadata', - 'CreateTableFromSnapshotRequest', - 'CreateTableRequest', - 'DeleteBackupRequest', - 'DeleteSnapshotRequest', - 'DeleteTableRequest', - 'DropRowRangeRequest', - 'GenerateConsistencyTokenRequest', - 'GenerateConsistencyTokenResponse', - 'GetBackupRequest', - 'GetSnapshotRequest', - 'GetTableRequest', - 'ListBackupsRequest', - 'ListBackupsResponse', - 'ListSnapshotsRequest', - 'ListSnapshotsResponse', - 'ListTablesRequest', - 'ListTablesResponse', - 'ModifyColumnFamiliesRequest', - 'OptimizeRestoredTableMetadata', - 'RestoreTableMetadata', - 'RestoreTableRequest', - 'SnapshotTableMetadata', - 'SnapshotTableRequest', - 'UndeleteTableMetadata', - 'UndeleteTableRequest', - 'UpdateBackupRequest', - 'UpdateTableMetadata', - 'UpdateTableRequest', - 'OperationProgress', - 'StorageType', - 'AppProfile', - 'AutoscalingLimits', - 'AutoscalingTargets', - 'Cluster', - 'HotTablet', - 'Instance', - 'Backup', - 'BackupInfo', - 'ChangeStreamConfig', - 'ColumnFamily', - 'EncryptionInfo', - 'GcRule', - 'RestoreInfo', - 'Snapshot', - 'Table', - 'RestoreSourceType', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/gapic_version.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/gapic_version.py deleted file mode 100644 index 360a0d13e..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/gapic_version.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/py.typed b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/py.typed deleted file mode 100644 index bc26f2069..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-bigtable-admin package uses inline types. diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/__init__.py deleted file mode 100644 index 97f640900..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/__init__.py +++ /dev/null @@ -1,190 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -__version__ = package_version.__version__ - - -from .services.bigtable_instance_admin import BigtableInstanceAdminClient -from .services.bigtable_instance_admin import BigtableInstanceAdminAsyncClient -from .services.bigtable_table_admin import BigtableTableAdminClient -from .services.bigtable_table_admin import BigtableTableAdminAsyncClient - -from .types.bigtable_instance_admin import CreateAppProfileRequest -from .types.bigtable_instance_admin import CreateClusterMetadata -from .types.bigtable_instance_admin import CreateClusterRequest -from .types.bigtable_instance_admin import CreateInstanceMetadata -from .types.bigtable_instance_admin import CreateInstanceRequest -from .types.bigtable_instance_admin import DeleteAppProfileRequest -from .types.bigtable_instance_admin import DeleteClusterRequest -from .types.bigtable_instance_admin import DeleteInstanceRequest -from .types.bigtable_instance_admin import GetAppProfileRequest -from .types.bigtable_instance_admin import GetClusterRequest -from .types.bigtable_instance_admin import GetInstanceRequest -from .types.bigtable_instance_admin import ListAppProfilesRequest -from .types.bigtable_instance_admin import ListAppProfilesResponse -from .types.bigtable_instance_admin import ListClustersRequest -from .types.bigtable_instance_admin import ListClustersResponse -from .types.bigtable_instance_admin import ListHotTabletsRequest -from .types.bigtable_instance_admin import ListHotTabletsResponse -from .types.bigtable_instance_admin import ListInstancesRequest -from .types.bigtable_instance_admin import ListInstancesResponse -from .types.bigtable_instance_admin import PartialUpdateClusterMetadata -from .types.bigtable_instance_admin import PartialUpdateClusterRequest -from .types.bigtable_instance_admin import PartialUpdateInstanceRequest -from .types.bigtable_instance_admin import UpdateAppProfileMetadata -from .types.bigtable_instance_admin import UpdateAppProfileRequest -from .types.bigtable_instance_admin import UpdateClusterMetadata -from .types.bigtable_instance_admin import UpdateInstanceMetadata -from .types.bigtable_table_admin import CheckConsistencyRequest -from .types.bigtable_table_admin import CheckConsistencyResponse -from .types.bigtable_table_admin import CopyBackupMetadata -from .types.bigtable_table_admin import CopyBackupRequest -from .types.bigtable_table_admin import CreateBackupMetadata -from .types.bigtable_table_admin import CreateBackupRequest -from .types.bigtable_table_admin import CreateTableFromSnapshotMetadata -from .types.bigtable_table_admin import CreateTableFromSnapshotRequest -from .types.bigtable_table_admin import CreateTableRequest -from .types.bigtable_table_admin import DeleteBackupRequest -from .types.bigtable_table_admin import DeleteSnapshotRequest -from .types.bigtable_table_admin import DeleteTableRequest -from .types.bigtable_table_admin import DropRowRangeRequest -from .types.bigtable_table_admin import GenerateConsistencyTokenRequest -from .types.bigtable_table_admin import GenerateConsistencyTokenResponse -from .types.bigtable_table_admin import GetBackupRequest -from .types.bigtable_table_admin import GetSnapshotRequest -from .types.bigtable_table_admin import GetTableRequest -from .types.bigtable_table_admin import ListBackupsRequest -from .types.bigtable_table_admin import ListBackupsResponse -from .types.bigtable_table_admin import ListSnapshotsRequest -from .types.bigtable_table_admin import ListSnapshotsResponse -from .types.bigtable_table_admin import ListTablesRequest -from .types.bigtable_table_admin import ListTablesResponse -from .types.bigtable_table_admin import ModifyColumnFamiliesRequest -from .types.bigtable_table_admin import OptimizeRestoredTableMetadata -from .types.bigtable_table_admin import RestoreTableMetadata -from .types.bigtable_table_admin import RestoreTableRequest -from .types.bigtable_table_admin import SnapshotTableMetadata -from .types.bigtable_table_admin import SnapshotTableRequest -from .types.bigtable_table_admin import UndeleteTableMetadata -from .types.bigtable_table_admin import UndeleteTableRequest -from .types.bigtable_table_admin import UpdateBackupRequest -from .types.bigtable_table_admin import UpdateTableMetadata -from .types.bigtable_table_admin import UpdateTableRequest -from .types.common import OperationProgress -from .types.common import StorageType -from .types.instance import AppProfile -from .types.instance import AutoscalingLimits -from .types.instance import AutoscalingTargets -from .types.instance import Cluster -from .types.instance import HotTablet -from .types.instance import Instance -from .types.table import Backup -from .types.table import BackupInfo -from .types.table import ChangeStreamConfig -from .types.table import ColumnFamily -from .types.table import EncryptionInfo -from .types.table import GcRule -from .types.table import RestoreInfo -from .types.table import Snapshot -from .types.table import Table -from .types.table import RestoreSourceType - -__all__ = ( - 'BigtableInstanceAdminAsyncClient', - 'BigtableTableAdminAsyncClient', -'AppProfile', -'AutoscalingLimits', -'AutoscalingTargets', -'Backup', -'BackupInfo', -'BigtableInstanceAdminClient', -'BigtableTableAdminClient', -'ChangeStreamConfig', -'CheckConsistencyRequest', -'CheckConsistencyResponse', -'Cluster', -'ColumnFamily', -'CopyBackupMetadata', -'CopyBackupRequest', -'CreateAppProfileRequest', -'CreateBackupMetadata', -'CreateBackupRequest', -'CreateClusterMetadata', -'CreateClusterRequest', -'CreateInstanceMetadata', -'CreateInstanceRequest', -'CreateTableFromSnapshotMetadata', -'CreateTableFromSnapshotRequest', -'CreateTableRequest', -'DeleteAppProfileRequest', -'DeleteBackupRequest', -'DeleteClusterRequest', -'DeleteInstanceRequest', -'DeleteSnapshotRequest', -'DeleteTableRequest', -'DropRowRangeRequest', -'EncryptionInfo', -'GcRule', -'GenerateConsistencyTokenRequest', -'GenerateConsistencyTokenResponse', -'GetAppProfileRequest', -'GetBackupRequest', -'GetClusterRequest', -'GetInstanceRequest', -'GetSnapshotRequest', -'GetTableRequest', -'HotTablet', -'Instance', -'ListAppProfilesRequest', -'ListAppProfilesResponse', -'ListBackupsRequest', -'ListBackupsResponse', -'ListClustersRequest', -'ListClustersResponse', -'ListHotTabletsRequest', -'ListHotTabletsResponse', -'ListInstancesRequest', -'ListInstancesResponse', -'ListSnapshotsRequest', -'ListSnapshotsResponse', -'ListTablesRequest', -'ListTablesResponse', -'ModifyColumnFamiliesRequest', -'OperationProgress', -'OptimizeRestoredTableMetadata', -'PartialUpdateClusterMetadata', -'PartialUpdateClusterRequest', -'PartialUpdateInstanceRequest', -'RestoreInfo', -'RestoreSourceType', -'RestoreTableMetadata', -'RestoreTableRequest', -'Snapshot', -'SnapshotTableMetadata', -'SnapshotTableRequest', -'StorageType', -'Table', -'UndeleteTableMetadata', -'UndeleteTableRequest', -'UpdateAppProfileMetadata', -'UpdateAppProfileRequest', -'UpdateBackupRequest', -'UpdateClusterMetadata', -'UpdateInstanceMetadata', -'UpdateTableMetadata', -'UpdateTableRequest', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_metadata.json b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_metadata.json deleted file mode 100644 index 9b3426470..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_metadata.json +++ /dev/null @@ -1,737 +0,0 @@ - { - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "python", - "libraryPackage": "google.cloud.bigtable_admin_v2", - "protoPackage": "google.bigtable.admin.v2", - "schema": "1.0", - "services": { - "BigtableInstanceAdmin": { - "clients": { - "grpc": { - "libraryClient": "BigtableInstanceAdminClient", - "rpcs": { - "CreateAppProfile": { - "methods": [ - "create_app_profile" - ] - }, - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "CreateInstance": { - "methods": [ - "create_instance" - ] - }, - "DeleteAppProfile": { - "methods": [ - "delete_app_profile" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DeleteInstance": { - "methods": [ - "delete_instance" - ] - }, - "GetAppProfile": { - "methods": [ - "get_app_profile" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "GetIamPolicy": { - "methods": [ - "get_iam_policy" - ] - }, - "GetInstance": { - "methods": [ - "get_instance" - ] - }, - "ListAppProfiles": { - "methods": [ - "list_app_profiles" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "ListHotTablets": { - "methods": [ - "list_hot_tablets" - ] - }, - "ListInstances": { - "methods": [ - "list_instances" - ] - }, - "PartialUpdateCluster": { - "methods": [ - "partial_update_cluster" - ] - }, - "PartialUpdateInstance": { - "methods": [ - "partial_update_instance" - ] - }, - "SetIamPolicy": { - "methods": [ - "set_iam_policy" - ] - }, - "TestIamPermissions": { - "methods": [ - "test_iam_permissions" - ] - }, - "UpdateAppProfile": { - "methods": [ - "update_app_profile" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - }, - "UpdateInstance": { - "methods": [ - "update_instance" - ] - } - } - }, - "grpc-async": { - "libraryClient": "BigtableInstanceAdminAsyncClient", - "rpcs": { - "CreateAppProfile": { - "methods": [ - "create_app_profile" - ] - }, - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "CreateInstance": { - "methods": [ - "create_instance" - ] - }, - "DeleteAppProfile": { - "methods": [ - "delete_app_profile" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DeleteInstance": { - "methods": [ - "delete_instance" - ] - }, - "GetAppProfile": { - "methods": [ - "get_app_profile" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "GetIamPolicy": { - "methods": [ - "get_iam_policy" - ] - }, - "GetInstance": { - "methods": [ - "get_instance" - ] - }, - "ListAppProfiles": { - "methods": [ - "list_app_profiles" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "ListHotTablets": { - "methods": [ - "list_hot_tablets" - ] - }, - "ListInstances": { - "methods": [ - "list_instances" - ] - }, - "PartialUpdateCluster": { - "methods": [ - "partial_update_cluster" - ] - }, - "PartialUpdateInstance": { - "methods": [ - "partial_update_instance" - ] - }, - "SetIamPolicy": { - "methods": [ - "set_iam_policy" - ] - }, - "TestIamPermissions": { - "methods": [ - "test_iam_permissions" - ] - }, - "UpdateAppProfile": { - "methods": [ - "update_app_profile" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - }, - "UpdateInstance": { - "methods": [ - "update_instance" - ] - } - } - }, - "rest": { - "libraryClient": "BigtableInstanceAdminClient", - "rpcs": { - "CreateAppProfile": { - "methods": [ - "create_app_profile" - ] - }, - "CreateCluster": { - "methods": [ - "create_cluster" - ] - }, - "CreateInstance": { - "methods": [ - "create_instance" - ] - }, - "DeleteAppProfile": { - "methods": [ - "delete_app_profile" - ] - }, - "DeleteCluster": { - "methods": [ - "delete_cluster" - ] - }, - "DeleteInstance": { - "methods": [ - "delete_instance" - ] - }, - "GetAppProfile": { - "methods": [ - "get_app_profile" - ] - }, - "GetCluster": { - "methods": [ - "get_cluster" - ] - }, - "GetIamPolicy": { - "methods": [ - "get_iam_policy" - ] - }, - "GetInstance": { - "methods": [ - "get_instance" - ] - }, - "ListAppProfiles": { - "methods": [ - "list_app_profiles" - ] - }, - "ListClusters": { - "methods": [ - "list_clusters" - ] - }, - "ListHotTablets": { - "methods": [ - "list_hot_tablets" - ] - }, - "ListInstances": { - "methods": [ - "list_instances" - ] - }, - "PartialUpdateCluster": { - "methods": [ - "partial_update_cluster" - ] - }, - "PartialUpdateInstance": { - "methods": [ - "partial_update_instance" - ] - }, - "SetIamPolicy": { - "methods": [ - "set_iam_policy" - ] - }, - "TestIamPermissions": { - "methods": [ - "test_iam_permissions" - ] - }, - "UpdateAppProfile": { - "methods": [ - "update_app_profile" - ] - }, - "UpdateCluster": { - "methods": [ - "update_cluster" - ] - }, - "UpdateInstance": { - "methods": [ - "update_instance" - ] - } - } - } - } - }, - "BigtableTableAdmin": { - "clients": { - "grpc": { - "libraryClient": "BigtableTableAdminClient", - "rpcs": { - "CheckConsistency": { - "methods": [ - "check_consistency" - ] - }, - "CopyBackup": { - "methods": [ - "copy_backup" - ] - }, - "CreateBackup": { - "methods": [ - "create_backup" - ] - }, - "CreateTable": { - "methods": [ - "create_table" - ] - }, - "CreateTableFromSnapshot": { - "methods": [ - "create_table_from_snapshot" - ] - }, - "DeleteBackup": { - "methods": [ - "delete_backup" - ] - }, - "DeleteSnapshot": { - "methods": [ - "delete_snapshot" - ] - }, - "DeleteTable": { - "methods": [ - "delete_table" - ] - }, - "DropRowRange": { - "methods": [ - "drop_row_range" - ] - }, - "GenerateConsistencyToken": { - "methods": [ - "generate_consistency_token" - ] - }, - "GetBackup": { - "methods": [ - "get_backup" - ] - }, - "GetIamPolicy": { - "methods": [ - "get_iam_policy" - ] - }, - "GetSnapshot": { - "methods": [ - "get_snapshot" - ] - }, - "GetTable": { - "methods": [ - "get_table" - ] - }, - "ListBackups": { - "methods": [ - "list_backups" - ] - }, - "ListSnapshots": { - "methods": [ - "list_snapshots" - ] - }, - "ListTables": { - "methods": [ - "list_tables" - ] - }, - "ModifyColumnFamilies": { - "methods": [ - "modify_column_families" - ] - }, - "RestoreTable": { - "methods": [ - "restore_table" - ] - }, - "SetIamPolicy": { - "methods": [ - "set_iam_policy" - ] - }, - "SnapshotTable": { - "methods": [ - "snapshot_table" - ] - }, - "TestIamPermissions": { - "methods": [ - "test_iam_permissions" - ] - }, - "UndeleteTable": { - "methods": [ - "undelete_table" - ] - }, - "UpdateBackup": { - "methods": [ - "update_backup" - ] - }, - "UpdateTable": { - "methods": [ - "update_table" - ] - } - } - }, - "grpc-async": { - "libraryClient": "BigtableTableAdminAsyncClient", - "rpcs": { - "CheckConsistency": { - "methods": [ - "check_consistency" - ] - }, - "CopyBackup": { - "methods": [ - "copy_backup" - ] - }, - "CreateBackup": { - "methods": [ - "create_backup" - ] - }, - "CreateTable": { - "methods": [ - "create_table" - ] - }, - "CreateTableFromSnapshot": { - "methods": [ - "create_table_from_snapshot" - ] - }, - "DeleteBackup": { - "methods": [ - "delete_backup" - ] - }, - "DeleteSnapshot": { - "methods": [ - "delete_snapshot" - ] - }, - "DeleteTable": { - "methods": [ - "delete_table" - ] - }, - "DropRowRange": { - "methods": [ - "drop_row_range" - ] - }, - "GenerateConsistencyToken": { - "methods": [ - "generate_consistency_token" - ] - }, - "GetBackup": { - "methods": [ - "get_backup" - ] - }, - "GetIamPolicy": { - "methods": [ - "get_iam_policy" - ] - }, - "GetSnapshot": { - "methods": [ - "get_snapshot" - ] - }, - "GetTable": { - "methods": [ - "get_table" - ] - }, - "ListBackups": { - "methods": [ - "list_backups" - ] - }, - "ListSnapshots": { - "methods": [ - "list_snapshots" - ] - }, - "ListTables": { - "methods": [ - "list_tables" - ] - }, - "ModifyColumnFamilies": { - "methods": [ - "modify_column_families" - ] - }, - "RestoreTable": { - "methods": [ - "restore_table" - ] - }, - "SetIamPolicy": { - "methods": [ - "set_iam_policy" - ] - }, - "SnapshotTable": { - "methods": [ - "snapshot_table" - ] - }, - "TestIamPermissions": { - "methods": [ - "test_iam_permissions" - ] - }, - "UndeleteTable": { - "methods": [ - "undelete_table" - ] - }, - "UpdateBackup": { - "methods": [ - "update_backup" - ] - }, - "UpdateTable": { - "methods": [ - "update_table" - ] - } - } - }, - "rest": { - "libraryClient": "BigtableTableAdminClient", - "rpcs": { - "CheckConsistency": { - "methods": [ - "check_consistency" - ] - }, - "CopyBackup": { - "methods": [ - "copy_backup" - ] - }, - "CreateBackup": { - "methods": [ - "create_backup" - ] - }, - "CreateTable": { - "methods": [ - "create_table" - ] - }, - "CreateTableFromSnapshot": { - "methods": [ - "create_table_from_snapshot" - ] - }, - "DeleteBackup": { - "methods": [ - "delete_backup" - ] - }, - "DeleteSnapshot": { - "methods": [ - "delete_snapshot" - ] - }, - "DeleteTable": { - "methods": [ - "delete_table" - ] - }, - "DropRowRange": { - "methods": [ - "drop_row_range" - ] - }, - "GenerateConsistencyToken": { - "methods": [ - "generate_consistency_token" - ] - }, - "GetBackup": { - "methods": [ - "get_backup" - ] - }, - "GetIamPolicy": { - "methods": [ - "get_iam_policy" - ] - }, - "GetSnapshot": { - "methods": [ - "get_snapshot" - ] - }, - "GetTable": { - "methods": [ - "get_table" - ] - }, - "ListBackups": { - "methods": [ - "list_backups" - ] - }, - "ListSnapshots": { - "methods": [ - "list_snapshots" - ] - }, - "ListTables": { - "methods": [ - "list_tables" - ] - }, - "ModifyColumnFamilies": { - "methods": [ - "modify_column_families" - ] - }, - "RestoreTable": { - "methods": [ - "restore_table" - ] - }, - "SetIamPolicy": { - "methods": [ - "set_iam_policy" - ] - }, - "SnapshotTable": { - "methods": [ - "snapshot_table" - ] - }, - "TestIamPermissions": { - "methods": [ - "test_iam_permissions" - ] - }, - "UndeleteTable": { - "methods": [ - "undelete_table" - ] - }, - "UpdateBackup": { - "methods": [ - "update_backup" - ] - }, - "UpdateTable": { - "methods": [ - "update_table" - ] - } - } - } - } - } - } -} diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_version.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_version.py deleted file mode 100644 index 360a0d13e..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/gapic_version.py +++ /dev/null @@ -1,16 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -__version__ = "0.0.0" # {x-release-please-version} diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/py.typed b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/py.typed deleted file mode 100644 index bc26f2069..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/py.typed +++ /dev/null @@ -1,2 +0,0 @@ -# Marker file for PEP 561. -# The google-cloud-bigtable-admin package uses inline types. diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/__init__.py deleted file mode 100644 index 89a37dc92..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/__init__.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py deleted file mode 100644 index 2ff9a47f4..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import BigtableInstanceAdminClient -from .async_client import BigtableInstanceAdminAsyncClient - -__all__ = ( - 'BigtableInstanceAdminClient', - 'BigtableInstanceAdminAsyncClient', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py deleted file mode 100644 index 591ead4ea..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/async_client.py +++ /dev/null @@ -1,2269 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union - -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry_async as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import common -from google.cloud.bigtable_admin_v2.types import instance -from google.cloud.bigtable_admin_v2.types import instance as gba_instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport -from .client import BigtableInstanceAdminClient - - -class BigtableInstanceAdminAsyncClient: - """Service for creating, configuring, and deleting Cloud - Bigtable Instances and Clusters. Provides access to the Instance - and Cluster schemas only, not the tables' metadata or data - stored in those tables. - """ - - _client: BigtableInstanceAdminClient - - # Copy defaults from the synchronous client for use here. - # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. - DEFAULT_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT - _DEFAULT_ENDPOINT_TEMPLATE = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE - _DEFAULT_UNIVERSE = BigtableInstanceAdminClient._DEFAULT_UNIVERSE - - app_profile_path = staticmethod(BigtableInstanceAdminClient.app_profile_path) - parse_app_profile_path = staticmethod(BigtableInstanceAdminClient.parse_app_profile_path) - cluster_path = staticmethod(BigtableInstanceAdminClient.cluster_path) - parse_cluster_path = staticmethod(BigtableInstanceAdminClient.parse_cluster_path) - crypto_key_path = staticmethod(BigtableInstanceAdminClient.crypto_key_path) - parse_crypto_key_path = staticmethod(BigtableInstanceAdminClient.parse_crypto_key_path) - hot_tablet_path = staticmethod(BigtableInstanceAdminClient.hot_tablet_path) - parse_hot_tablet_path = staticmethod(BigtableInstanceAdminClient.parse_hot_tablet_path) - instance_path = staticmethod(BigtableInstanceAdminClient.instance_path) - parse_instance_path = staticmethod(BigtableInstanceAdminClient.parse_instance_path) - table_path = staticmethod(BigtableInstanceAdminClient.table_path) - parse_table_path = staticmethod(BigtableInstanceAdminClient.parse_table_path) - common_billing_account_path = staticmethod(BigtableInstanceAdminClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(BigtableInstanceAdminClient.parse_common_billing_account_path) - common_folder_path = staticmethod(BigtableInstanceAdminClient.common_folder_path) - parse_common_folder_path = staticmethod(BigtableInstanceAdminClient.parse_common_folder_path) - common_organization_path = staticmethod(BigtableInstanceAdminClient.common_organization_path) - parse_common_organization_path = staticmethod(BigtableInstanceAdminClient.parse_common_organization_path) - common_project_path = staticmethod(BigtableInstanceAdminClient.common_project_path) - parse_common_project_path = staticmethod(BigtableInstanceAdminClient.parse_common_project_path) - common_location_path = staticmethod(BigtableInstanceAdminClient.common_location_path) - parse_common_location_path = staticmethod(BigtableInstanceAdminClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableInstanceAdminAsyncClient: The constructed client. - """ - return BigtableInstanceAdminClient.from_service_account_info.__func__(BigtableInstanceAdminAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableInstanceAdminAsyncClient: The constructed client. - """ - return BigtableInstanceAdminClient.from_service_account_file.__func__(BigtableInstanceAdminAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): - """Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - return BigtableInstanceAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore - - @property - def transport(self) -> BigtableInstanceAdminTransport: - """Returns the transport used by the client instance. - - Returns: - BigtableInstanceAdminTransport: The transport used by the client instance. - """ - return self._client.transport - - @property - def api_endpoint(self): - """Return the API endpoint used by the client instance. - - Returns: - str: The API endpoint used by the client instance. - """ - return self._client._api_endpoint - - @property - def universe_domain(self) -> str: - """Return the universe domain used by the client instance. - - Returns: - str: The universe domain used - by the client instance. - """ - return self._client._universe_domain - - get_transport_class = functools.partial(type(BigtableInstanceAdminClient).get_transport_class, type(BigtableInstanceAdminClient)) - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigtableInstanceAdminTransport] = "grpc_asyncio", - client_options: Optional[ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the bigtable instance admin async client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.BigtableInstanceAdminTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): - Custom options for the client. - - 1. The ``api_endpoint`` property can be used to override the - default endpoint provided by the client when ``transport`` is - not explicitly provided. Only if this property is not set and - ``transport`` was not explicitly provided, the endpoint is - determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment - variable, which have one of the following values: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto-switch to the - default mTLS endpoint if client certificate is present; this is - the default value). - - 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide a client certificate for mTLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - 3. The ``universe_domain`` property can be used to override the - default "googleapis.com" universe. Note that ``api_endpoint`` - property still takes precedence; and ``universe_domain`` is - currently not supported for mTLS. - - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = BigtableInstanceAdminClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_instance(self, - request: Optional[Union[bigtable_instance_admin.CreateInstanceRequest, dict]] = None, - *, - parent: Optional[str] = None, - instance_id: Optional[str] = None, - instance: Optional[gba_instance.Instance] = None, - clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Create an instance within a project. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.CreateInstance. - parent (:class:`str`): - Required. The unique name of the project in which to - create the new instance. Values are of the form - ``projects/{project}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instance_id (:class:`str`): - Required. The ID to be used when referring to the new - instance within its project, e.g., just ``myinstance`` - rather than ``projects/myproject/instances/myinstance``. - - This corresponds to the ``instance_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`): - Required. The instance to create. Fields marked - ``OutputOnly`` must be left blank. - - This corresponds to the ``instance`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - clusters (:class:`MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]`): - Required. The clusters to be created within the - instance, mapped by desired cluster ID, e.g., just - ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - Fields marked ``OutputOnly`` must be left blank. - Currently, at most four clusters can be specified. - - This corresponds to the ``clusters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, instance_id, instance, clusters]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.CreateInstanceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if instance_id is not None: - request.instance_id = instance_id - if instance is not None: - request.instance = instance - - if clusters: - request.clusters.update(clusters) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_instance, - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gba_instance.Instance, - metadata_type=bigtable_instance_admin.CreateInstanceMetadata, - ) - - # Done; return the response. - return response - - async def get_instance(self, - request: Optional[Union[bigtable_instance_admin.GetInstanceRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> instance.Instance: - r"""Gets information about an instance. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.GetInstance. - name (:class:`str`): - Required. The unique name of the requested instance. - Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Instance: - A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.GetInstanceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_instance, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_instances(self, - request: Optional[Union[bigtable_instance_admin.ListInstancesRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_instance_admin.ListInstancesResponse: - r"""Lists information about instances in a project. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.ListInstances. - parent (:class:`str`): - Required. The unique name of the project for which a - list of instances is requested. Values are of the form - ``projects/{project}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.ListInstancesResponse: - Response message for - BigtableInstanceAdmin.ListInstances. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.ListInstancesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_instances, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_instance(self, - request: Optional[Union[instance.Instance, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> instance.Instance: - r"""Updates an instance within a project. This method - updates only the display name and type for an Instance. - To update other Instance properties, such as labels, use - PartialUpdateInstance. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.Instance, dict]]): - The request object. A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Instance: - A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - request = instance.Instance(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_instance, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def partial_update_instance(self, - request: Optional[Union[bigtable_instance_admin.PartialUpdateInstanceRequest, dict]] = None, - *, - instance: Optional[gba_instance.Instance] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Partially updates an instance within a project. This - method can modify all fields of an Instance and is the - preferred way to update an Instance. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.PartialUpdateInstance. - instance (:class:`google.cloud.bigtable_admin_v2.types.Instance`): - Required. The Instance which will - (partially) replace the current value. - - This corresponds to the ``instance`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The subset of Instance - fields which should be replaced. Must be - explicitly set. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([instance, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if instance is not None: - request.instance = instance - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.partial_update_instance, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("instance.name", request.instance.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gba_instance.Instance, - metadata_type=bigtable_instance_admin.UpdateInstanceMetadata, - ) - - # Done; return the response. - return response - - async def delete_instance(self, - request: Optional[Union[bigtable_instance_admin.DeleteInstanceRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Delete an instance from a project. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.DeleteInstance. - name (:class:`str`): - Required. The unique name of the instance to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.DeleteInstanceRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_instance, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_cluster(self, - request: Optional[Union[bigtable_instance_admin.CreateClusterRequest, dict]] = None, - *, - parent: Optional[str] = None, - cluster_id: Optional[str] = None, - cluster: Optional[instance.Cluster] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a cluster within an instance. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.CreateCluster. - parent (:class:`str`): - Required. The unique name of the instance in which to - create the new cluster. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (:class:`str`): - Required. The ID to be used when referring to the new - cluster within its instance, e.g., just ``mycluster`` - rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`): - Required. The cluster to be created. Fields marked - ``OutputOnly`` must be left blank. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, cluster_id, cluster]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.CreateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if cluster_id is not None: - request.cluster_id = cluster_id - if cluster is not None: - request.cluster = cluster - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_cluster, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - instance.Cluster, - metadata_type=bigtable_instance_admin.CreateClusterMetadata, - ) - - # Done; return the response. - return response - - async def get_cluster(self, - request: Optional[Union[bigtable_instance_admin.GetClusterRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> instance.Cluster: - r"""Gets information about a cluster. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.GetCluster. - name (:class:`str`): - Required. The unique name of the requested cluster. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Cluster: - A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.GetClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_cluster, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_clusters(self, - request: Optional[Union[bigtable_instance_admin.ListClustersRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_instance_admin.ListClustersResponse: - r"""Lists information about clusters in an instance. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.ListClusters. - parent (:class:`str`): - Required. The unique name of the instance for which a - list of clusters is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use - ``{instance} = '-'`` to list Clusters for all Instances - in a project, e.g., ``projects/myproject/instances/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.ListClustersResponse: - Response message for - BigtableInstanceAdmin.ListClusters. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.ListClustersRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_clusters, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_cluster(self, - request: Optional[Union[instance.Cluster, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a cluster within an instance. - - Note that UpdateCluster does not support updating - cluster_config.cluster_autoscaling_config. In order to update - it, you must use PartialUpdateCluster. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]]): - The request object. A resizable group of nodes in a particular cloud - location, capable of serving all - [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - request = instance.Cluster(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_cluster, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - instance.Cluster, - metadata_type=bigtable_instance_admin.UpdateClusterMetadata, - ) - - # Done; return the response. - return response - - async def partial_update_cluster(self, - request: Optional[Union[bigtable_instance_admin.PartialUpdateClusterRequest, dict]] = None, - *, - cluster: Optional[instance.Cluster] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Partially updates a cluster within a project. This method is the - preferred way to update a Cluster. - - To enable and update autoscaling, set - cluster_config.cluster_autoscaling_config. When autoscaling is - enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning - that updates to it are ignored. Note that an update cannot - simultaneously set serve_nodes to non-zero and - cluster_config.cluster_autoscaling_config to non-empty, and also - specify both in the update_mask. - - To disable autoscaling, clear - cluster_config.cluster_autoscaling_config, and explicitly set a - serve_node count via the update_mask. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.PartialUpdateCluster. - cluster (:class:`google.cloud.bigtable_admin_v2.types.Cluster`): - Required. The Cluster which contains the partial updates - to be applied, subject to the update_mask. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The subset of Cluster - fields which should be replaced. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([cluster, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.PartialUpdateClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if cluster is not None: - request.cluster = cluster - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.partial_update_cluster, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("cluster.name", request.cluster.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - instance.Cluster, - metadata_type=bigtable_instance_admin.PartialUpdateClusterMetadata, - ) - - # Done; return the response. - return response - - async def delete_cluster(self, - request: Optional[Union[bigtable_instance_admin.DeleteClusterRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a cluster from an instance. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.DeleteCluster. - name (:class:`str`): - Required. The unique name of the cluster to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.DeleteClusterRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_cluster, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_app_profile(self, - request: Optional[Union[bigtable_instance_admin.CreateAppProfileRequest, dict]] = None, - *, - parent: Optional[str] = None, - app_profile_id: Optional[str] = None, - app_profile: Optional[instance.AppProfile] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> instance.AppProfile: - r"""Creates an app profile within an instance. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.CreateAppProfile. - parent (:class:`str`): - Required. The unique name of the instance in which to - create the new app profile. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (:class:`str`): - Required. The ID to be used when referring to the new - app profile within its instance, e.g., just - ``myprofile`` rather than - ``projects/myproject/instances/myinstance/appProfiles/myprofile``. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`): - Required. The app profile to be created. Fields marked - ``OutputOnly`` will be ignored. - - This corresponds to the ``app_profile`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.AppProfile: - A configuration object describing how - Cloud Bigtable should treat traffic from - a particular end user application. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, app_profile_id, app_profile]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.CreateAppProfileRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if app_profile_id is not None: - request.app_profile_id = app_profile_id - if app_profile is not None: - request.app_profile = app_profile - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_app_profile, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_app_profile(self, - request: Optional[Union[bigtable_instance_admin.GetAppProfileRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> instance.AppProfile: - r"""Gets information about an app profile. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.GetAppProfile. - name (:class:`str`): - Required. The unique name of the requested app profile. - Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.AppProfile: - A configuration object describing how - Cloud Bigtable should treat traffic from - a particular end user application. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.GetAppProfileRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_app_profile, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_app_profiles(self, - request: Optional[Union[bigtable_instance_admin.ListAppProfilesRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAppProfilesAsyncPager: - r"""Lists information about app profiles in an instance. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.ListAppProfiles. - parent (:class:`str`): - Required. The unique name of the instance for which a - list of app profiles is requested. Values are of the - form ``projects/{project}/instances/{instance}``. Use - ``{instance} = '-'`` to list AppProfiles for all - Instances in a project, e.g., - ``projects/myproject/instances/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesAsyncPager: - Response message for - BigtableInstanceAdmin.ListAppProfiles. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.ListAppProfilesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_app_profiles, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListAppProfilesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_app_profile(self, - request: Optional[Union[bigtable_instance_admin.UpdateAppProfileRequest, dict]] = None, - *, - app_profile: Optional[instance.AppProfile] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates an app profile within an instance. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.UpdateAppProfile. - app_profile (:class:`google.cloud.bigtable_admin_v2.types.AppProfile`): - Required. The app profile which will - (partially) replace the current value. - - This corresponds to the ``app_profile`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The subset of app profile - fields which should be replaced. If - unset, all fields will be replaced. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AppProfile` A configuration object describing how Cloud Bigtable should treat traffic - from a particular end user application. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([app_profile, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.UpdateAppProfileRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if app_profile is not None: - request.app_profile = app_profile - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_app_profile, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("app_profile.name", request.app_profile.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - instance.AppProfile, - metadata_type=bigtable_instance_admin.UpdateAppProfileMetadata, - ) - - # Done; return the response. - return response - - async def delete_app_profile(self, - request: Optional[Union[bigtable_instance_admin.DeleteAppProfileRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes an app profile from an instance. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.DeleteAppProfile. - name (:class:`str`): - Required. The unique name of the app profile to be - deleted. Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.DeleteAppProfileRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_app_profile, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def get_iam_policy(self, - request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> policy_pb2.Policy: - r"""Gets the access control policy for an instance - resource. Returns an empty policy if an instance exists - but does not have a policy set. - - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): - The request object. Request message for ``GetIamPolicy`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.GetIamPolicyRequest(**request) - elif not request: - request = iam_policy_pb2.GetIamPolicyRequest(resource=resource, ) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_iam_policy, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("resource", request.resource), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_iam_policy(self, - request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> policy_pb2.Policy: - r"""Sets the access control policy on an instance - resource. Replaces any existing policy. - - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): - The request object. Request message for ``SetIamPolicy`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.SetIamPolicyRequest(**request) - elif not request: - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, ) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_iam_policy, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("resource", request.resource), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def test_iam_permissions(self, - request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, - *, - resource: Optional[str] = None, - permissions: Optional[MutableSequence[str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Returns permissions that the caller has on the - specified instance resource. - - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): - The request object. Request message for ``TestIamPermissions`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - permissions (:class:`MutableSequence[str]`): - The set of permissions to check for the ``resource``. - Permissions with wildcards (such as '*' or 'storage.*') - are not allowed. For more information see `IAM - Overview `__. - - This corresponds to the ``permissions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: - Response message for TestIamPermissions method. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource, permissions]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.TestIamPermissionsRequest(**request) - elif not request: - request = iam_policy_pb2.TestIamPermissionsRequest(resource=resource, permissions=permissions, ) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.test_iam_permissions, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("resource", request.resource), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_hot_tablets(self, - request: Optional[Union[bigtable_instance_admin.ListHotTabletsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListHotTabletsAsyncPager: - r"""Lists hot tablets in a cluster, within the time range - provided. Hot tablets are ordered based on CPU usage. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]]): - The request object. Request message for - BigtableInstanceAdmin.ListHotTablets. - parent (:class:`str`): - Required. The cluster name to list hot tablets. Value is - in the following form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsAsyncPager: - Response message for - BigtableInstanceAdmin.ListHotTablets. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_instance_admin.ListHotTabletsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_hot_tablets, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListHotTabletsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self) -> "BigtableInstanceAdminAsyncClient": - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "BigtableInstanceAdminAsyncClient", -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py deleted file mode 100644 index da078b34f..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/client.py +++ /dev/null @@ -1,2572 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, cast -import warnings - -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import common -from google.cloud.bigtable_admin_v2.types import instance -from google.cloud.bigtable_admin_v2.types import instance as gba_instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import BigtableInstanceAdminGrpcTransport -from .transports.grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport -from .transports.rest import BigtableInstanceAdminRestTransport - - -class BigtableInstanceAdminClientMeta(type): - """Metaclass for the BigtableInstanceAdmin client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableInstanceAdminTransport]] - _transport_registry["grpc"] = BigtableInstanceAdminGrpcTransport - _transport_registry["grpc_asyncio"] = BigtableInstanceAdminGrpcAsyncIOTransport - _transport_registry["rest"] = BigtableInstanceAdminRestTransport - - def get_transport_class(cls, - label: Optional[str] = None, - ) -> Type[BigtableInstanceAdminTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class BigtableInstanceAdminClient(metaclass=BigtableInstanceAdminClientMeta): - """Service for creating, configuring, and deleting Cloud - Bigtable Instances and Clusters. Provides access to the Instance - and Cluster schemas only, not the tables' metadata or data - stored in those tables. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. - DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - _DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}" - _DEFAULT_UNIVERSE = "googleapis.com" - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableInstanceAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableInstanceAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> BigtableInstanceAdminTransport: - """Returns the transport used by the client instance. - - Returns: - BigtableInstanceAdminTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def app_profile_path(project: str,instance: str,app_profile: str,) -> str: - """Returns a fully-qualified app_profile string.""" - return "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format(project=project, instance=instance, app_profile=app_profile, ) - - @staticmethod - def parse_app_profile_path(path: str) -> Dict[str,str]: - """Parses a app_profile path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/appProfiles/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def cluster_path(project: str,instance: str,cluster: str,) -> str: - """Returns a fully-qualified cluster string.""" - return "projects/{project}/instances/{instance}/clusters/{cluster}".format(project=project, instance=instance, cluster=cluster, ) - - @staticmethod - def parse_cluster_path(path: str) -> Dict[str,str]: - """Parses a cluster path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def crypto_key_path(project: str,location: str,key_ring: str,crypto_key: str,) -> str: - """Returns a fully-qualified crypto_key string.""" - return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format(project=project, location=location, key_ring=key_ring, crypto_key=crypto_key, ) - - @staticmethod - def parse_crypto_key_path(path: str) -> Dict[str,str]: - """Parses a crypto_key path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def hot_tablet_path(project: str,instance: str,cluster: str,hot_tablet: str,) -> str: - """Returns a fully-qualified hot_tablet string.""" - return "projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/{hot_tablet}".format(project=project, instance=instance, cluster=cluster, hot_tablet=hot_tablet, ) - - @staticmethod - def parse_hot_tablet_path(path: str) -> Dict[str,str]: - """Parses a hot_tablet path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/hotTablets/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def instance_path(project: str,instance: str,) -> str: - """Returns a fully-qualified instance string.""" - return "projects/{project}/instances/{instance}".format(project=project, instance=instance, ) - - @staticmethod - def parse_instance_path(path: str) -> Dict[str,str]: - """Parses a instance path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def table_path(project: str,instance: str,table: str,) -> str: - """Returns a fully-qualified table string.""" - return "projects/{project}/instances/{instance}/tables/{table}".format(project=project, instance=instance, table=table, ) - - @staticmethod - def parse_table_path(path: str) -> Dict[str,str]: - """Parses a table path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): - """Deprecated. Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - - warnings.warn("get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", - DeprecationWarning) - if client_options is None: - client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") - - # Figure out the client cert source to use. - client_cert_source = None - if use_client_cert == "true": - if client_options.client_cert_source: - client_cert_source = client_options.client_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): - api_endpoint = cls.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = cls.DEFAULT_ENDPOINT - - return api_endpoint, client_cert_source - - @staticmethod - def _read_environment_variables(): - """Returns the environment variables used by the client. - - Returns: - Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, - GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. - - Raises: - ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not - any of ["true", "false"]. - google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT - is not any of ["auto", "never", "always"]. - """ - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false").lower() - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() - universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env - - def _get_client_cert_source(provided_cert_source, use_cert_flag): - """Return the client cert source to be used by the client. - - Args: - provided_cert_source (bytes): The client certificate source provided. - use_cert_flag (bool): A flag indicating whether to use the client certificate. - - Returns: - bytes or None: The client cert source to be used by the client. - """ - client_cert_source = None - if use_cert_flag: - if provided_cert_source: - client_cert_source = provided_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - return client_cert_source - - def _get_api_endpoint(api_override, client_cert_source, universe_domain, use_mtls_endpoint): - """Return the API endpoint used by the client. - - Args: - api_override (str): The API endpoint override. If specified, this is always - the return value of this function and the other arguments are not used. - client_cert_source (bytes): The client certificate source used by the client. - universe_domain (str): The universe domain used by the client. - use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. - Possible values are "always", "auto", or "never". - - Returns: - str: The API endpoint to be used by the client. - """ - if api_override is not None: - api_endpoint = api_override - elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): - _default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE - if universe_domain != _default_universe: - raise MutualTLSChannelError(f"mTLS is not supported in any universe other than {_default_universe}.") - api_endpoint = BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=universe_domain) - return api_endpoint - - @staticmethod - def _get_universe_domain(client_universe_domain: Optional[str], universe_domain_env: Optional[str]) -> str: - """Return the universe domain used by the client. - - Args: - client_universe_domain (Optional[str]): The universe domain configured via the client options. - universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. - - Returns: - str: The universe domain to be used by the client. - - Raises: - ValueError: If the universe domain is an empty string. - """ - universe_domain = BigtableInstanceAdminClient._DEFAULT_UNIVERSE - if client_universe_domain is not None: - universe_domain = client_universe_domain - elif universe_domain_env is not None: - universe_domain = universe_domain_env - if len(universe_domain.strip()) == 0: - raise ValueError("Universe Domain cannot be an empty string.") - return universe_domain - - @staticmethod - def _compare_universes(client_universe: str, - credentials: ga_credentials.Credentials) -> bool: - """Returns True iff the universe domains used by the client and credentials match. - - Args: - client_universe (str): The universe domain configured via the client options. - credentials (ga_credentials.Credentials): The credentials being used in the client. - - Returns: - bool: True iff client_universe matches the universe in credentials. - - Raises: - ValueError: when client_universe does not match the universe in credentials. - """ - if credentials: - credentials_universe = credentials.universe_domain - if client_universe != credentials_universe: - default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE - raise ValueError("The configured universe domain " - f"({client_universe}) does not match the universe domain " - f"found in the credentials ({credentials_universe}). " - "If you haven't configured the universe domain explicitly, " - f"`{default_universe}` is the default.") - return True - - def _validate_universe_domain(self): - """Validates client's and credentials' universe domains are consistent. - - Returns: - bool: True iff the configured universe domain is valid. - - Raises: - ValueError: If the configured universe domain is not valid. - """ - self._is_universe_domain_valid = (self._is_universe_domain_valid or - BigtableInstanceAdminClient._compare_universes(self.universe_domain, self.transport._credentials)) - return self._is_universe_domain_valid - - @property - def api_endpoint(self): - """Return the API endpoint used by the client instance. - - Returns: - str: The API endpoint used by the client instance. - """ - return self._api_endpoint - - @property - def universe_domain(self) -> str: - """Return the universe domain used by the client instance. - - Returns: - str: The universe domain used by the client instance. - """ - return self._universe_domain - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[Union[str, BigtableInstanceAdminTransport]] = None, - client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the bigtable instance admin client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, BigtableInstanceAdminTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): - Custom options for the client. - - 1. The ``api_endpoint`` property can be used to override the - default endpoint provided by the client when ``transport`` is - not explicitly provided. Only if this property is not set and - ``transport`` was not explicitly provided, the endpoint is - determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment - variable, which have one of the following values: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto-switch to the - default mTLS endpoint if client certificate is present; this is - the default value). - - 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide a client certificate for mTLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - 3. The ``universe_domain`` property can be used to override the - default "googleapis.com" universe. Note that the ``api_endpoint`` - property still takes precedence; and ``universe_domain`` is - currently not supported for mTLS. - - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client_options = client_options - if isinstance(self._client_options, dict): - self._client_options = client_options_lib.from_dict(self._client_options) - if self._client_options is None: - self._client_options = client_options_lib.ClientOptions() - self._client_options = cast(client_options_lib.ClientOptions, self._client_options) - - universe_domain_opt = getattr(self._client_options, 'universe_domain', None) - - self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = BigtableInstanceAdminClient._read_environment_variables() - self._client_cert_source = BigtableInstanceAdminClient._get_client_cert_source(self._client_options.client_cert_source, self._use_client_cert) - self._universe_domain = BigtableInstanceAdminClient._get_universe_domain(universe_domain_opt, self._universe_domain_env) - self._api_endpoint = None # updated below, depending on `transport` - - # Initialize the universe domain validation. - self._is_universe_domain_valid = False - - api_key_value = getattr(self._client_options, "api_key", None) - if api_key_value and credentials: - raise ValueError("client_options.api_key and credentials are mutually exclusive") - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - transport_provided = isinstance(transport, BigtableInstanceAdminTransport) - if transport_provided: - # transport is a BigtableInstanceAdminTransport instance. - if credentials or self._client_options.credentials_file or api_key_value: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if self._client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = cast(BigtableInstanceAdminTransport, transport) - self._api_endpoint = self._transport.host - - self._api_endpoint = (self._api_endpoint or - BigtableInstanceAdminClient._get_api_endpoint( - self._client_options.api_endpoint, - self._client_cert_source, - self._universe_domain, - self._use_mtls_endpoint)) - - if not transport_provided: - import google.auth._default # type: ignore - - if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): - credentials = google.auth._default.get_api_key_credentials(api_key_value) - - Transport = type(self).get_transport_class(cast(str, transport)) - self._transport = Transport( - credentials=credentials, - credentials_file=self._client_options.credentials_file, - host=self._api_endpoint, - scopes=self._client_options.scopes, - client_cert_source_for_mtls=self._client_cert_source, - quota_project_id=self._client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - api_audience=self._client_options.api_audience, - ) - - def create_instance(self, - request: Optional[Union[bigtable_instance_admin.CreateInstanceRequest, dict]] = None, - *, - parent: Optional[str] = None, - instance_id: Optional[str] = None, - instance: Optional[gba_instance.Instance] = None, - clusters: Optional[MutableMapping[str, gba_instance.Cluster]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Create an instance within a project. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateInstanceRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.CreateInstance. - parent (str): - Required. The unique name of the project in which to - create the new instance. Values are of the form - ``projects/{project}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instance_id (str): - Required. The ID to be used when referring to the new - instance within its project, e.g., just ``myinstance`` - rather than ``projects/myproject/instances/myinstance``. - - This corresponds to the ``instance_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - instance (google.cloud.bigtable_admin_v2.types.Instance): - Required. The instance to create. Fields marked - ``OutputOnly`` must be left blank. - - This corresponds to the ``instance`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - clusters (MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]): - Required. The clusters to be created within the - instance, mapped by desired cluster ID, e.g., just - ``mycluster`` rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - Fields marked ``OutputOnly`` must be left blank. - Currently, at most four clusters can be specified. - - This corresponds to the ``clusters`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, instance_id, instance, clusters]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.CreateInstanceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.CreateInstanceRequest): - request = bigtable_instance_admin.CreateInstanceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if instance_id is not None: - request.instance_id = instance_id - if instance is not None: - request.instance = instance - if clusters is not None: - request.clusters = clusters - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_instance] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - gba_instance.Instance, - metadata_type=bigtable_instance_admin.CreateInstanceMetadata, - ) - - # Done; return the response. - return response - - def get_instance(self, - request: Optional[Union[bigtable_instance_admin.GetInstanceRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> instance.Instance: - r"""Gets information about an instance. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetInstanceRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.GetInstance. - name (str): - Required. The unique name of the requested instance. - Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Instance: - A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.GetInstanceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.GetInstanceRequest): - request = bigtable_instance_admin.GetInstanceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_instance] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_instances(self, - request: Optional[Union[bigtable_instance_admin.ListInstancesRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_instance_admin.ListInstancesResponse: - r"""Lists information about instances in a project. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListInstancesRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.ListInstances. - parent (str): - Required. The unique name of the project for which a - list of instances is requested. Values are of the form - ``projects/{project}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.ListInstancesResponse: - Response message for - BigtableInstanceAdmin.ListInstances. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.ListInstancesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.ListInstancesRequest): - request = bigtable_instance_admin.ListInstancesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_instances] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_instance(self, - request: Optional[Union[instance.Instance, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> instance.Instance: - r"""Updates an instance within a project. This method - updates only the display name and type for an Instance. - To update other Instance properties, such as labels, use - PartialUpdateInstance. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.Instance, dict]): - The request object. A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Instance: - A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a instance.Instance. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, instance.Instance): - request = instance.Instance(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_instance] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def partial_update_instance(self, - request: Optional[Union[bigtable_instance_admin.PartialUpdateInstanceRequest, dict]] = None, - *, - instance: Optional[gba_instance.Instance] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Partially updates an instance within a project. This - method can modify all fields of an Instance and is the - preferred way to update an Instance. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.PartialUpdateInstance. - instance (google.cloud.bigtable_admin_v2.types.Instance): - Required. The Instance which will - (partially) replace the current value. - - This corresponds to the ``instance`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The subset of Instance - fields which should be replaced. Must be - explicitly set. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Instance` A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] and - the resources that serve them. All tables in an - instance are served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([instance, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.PartialUpdateInstanceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.PartialUpdateInstanceRequest): - request = bigtable_instance_admin.PartialUpdateInstanceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if instance is not None: - request.instance = instance - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.partial_update_instance] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("instance.name", request.instance.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - gba_instance.Instance, - metadata_type=bigtable_instance_admin.UpdateInstanceMetadata, - ) - - # Done; return the response. - return response - - def delete_instance(self, - request: Optional[Union[bigtable_instance_admin.DeleteInstanceRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Delete an instance from a project. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteInstanceRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.DeleteInstance. - name (str): - Required. The unique name of the instance to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.DeleteInstanceRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.DeleteInstanceRequest): - request = bigtable_instance_admin.DeleteInstanceRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_instance] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_cluster(self, - request: Optional[Union[bigtable_instance_admin.CreateClusterRequest, dict]] = None, - *, - parent: Optional[str] = None, - cluster_id: Optional[str] = None, - cluster: Optional[instance.Cluster] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Creates a cluster within an instance. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateClusterRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.CreateCluster. - parent (str): - Required. The unique name of the instance in which to - create the new cluster. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster_id (str): - Required. The ID to be used when referring to the new - cluster within its instance, e.g., just ``mycluster`` - rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - - This corresponds to the ``cluster_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (google.cloud.bigtable_admin_v2.types.Cluster): - Required. The cluster to be created. Fields marked - ``OutputOnly`` must be left blank. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, cluster_id, cluster]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.CreateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.CreateClusterRequest): - request = bigtable_instance_admin.CreateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if cluster_id is not None: - request.cluster_id = cluster_id - if cluster is not None: - request.cluster = cluster - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - instance.Cluster, - metadata_type=bigtable_instance_admin.CreateClusterMetadata, - ) - - # Done; return the response. - return response - - def get_cluster(self, - request: Optional[Union[bigtable_instance_admin.GetClusterRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> instance.Cluster: - r"""Gets information about a cluster. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetClusterRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.GetCluster. - name (str): - Required. The unique name of the requested cluster. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Cluster: - A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.GetClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.GetClusterRequest): - request = bigtable_instance_admin.GetClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_clusters(self, - request: Optional[Union[bigtable_instance_admin.ListClustersRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_instance_admin.ListClustersResponse: - r"""Lists information about clusters in an instance. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListClustersRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.ListClusters. - parent (str): - Required. The unique name of the instance for which a - list of clusters is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use - ``{instance} = '-'`` to list Clusters for all Instances - in a project, e.g., ``projects/myproject/instances/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.ListClustersResponse: - Response message for - BigtableInstanceAdmin.ListClusters. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.ListClustersRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.ListClustersRequest): - request = bigtable_instance_admin.ListClustersRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_clusters] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_cluster(self, - request: Optional[Union[instance.Cluster, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Updates a cluster within an instance. - - Note that UpdateCluster does not support updating - cluster_config.cluster_autoscaling_config. In order to update - it, you must use PartialUpdateCluster. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.Cluster, dict]): - The request object. A resizable group of nodes in a particular cloud - location, capable of serving all - [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a instance.Cluster. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, instance.Cluster): - request = instance.Cluster(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - instance.Cluster, - metadata_type=bigtable_instance_admin.UpdateClusterMetadata, - ) - - # Done; return the response. - return response - - def partial_update_cluster(self, - request: Optional[Union[bigtable_instance_admin.PartialUpdateClusterRequest, dict]] = None, - *, - cluster: Optional[instance.Cluster] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Partially updates a cluster within a project. This method is the - preferred way to update a Cluster. - - To enable and update autoscaling, set - cluster_config.cluster_autoscaling_config. When autoscaling is - enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning - that updates to it are ignored. Note that an update cannot - simultaneously set serve_nodes to non-zero and - cluster_config.cluster_autoscaling_config to non-empty, and also - specify both in the update_mask. - - To disable autoscaling, clear - cluster_config.cluster_autoscaling_config, and explicitly set a - serve_node count via the update_mask. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.PartialUpdateCluster. - cluster (google.cloud.bigtable_admin_v2.types.Cluster): - Required. The Cluster which contains the partial updates - to be applied, subject to the update_mask. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The subset of Cluster - fields which should be replaced. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Cluster` A resizable group of nodes in a particular cloud location, capable - of serving all - [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([cluster, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.PartialUpdateClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.PartialUpdateClusterRequest): - request = bigtable_instance_admin.PartialUpdateClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if cluster is not None: - request.cluster = cluster - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.partial_update_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("cluster.name", request.cluster.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - instance.Cluster, - metadata_type=bigtable_instance_admin.PartialUpdateClusterMetadata, - ) - - # Done; return the response. - return response - - def delete_cluster(self, - request: Optional[Union[bigtable_instance_admin.DeleteClusterRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a cluster from an instance. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteClusterRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.DeleteCluster. - name (str): - Required. The unique name of the cluster to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.DeleteClusterRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.DeleteClusterRequest): - request = bigtable_instance_admin.DeleteClusterRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_cluster] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_app_profile(self, - request: Optional[Union[bigtable_instance_admin.CreateAppProfileRequest, dict]] = None, - *, - parent: Optional[str] = None, - app_profile_id: Optional[str] = None, - app_profile: Optional[instance.AppProfile] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> instance.AppProfile: - r"""Creates an app profile within an instance. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateAppProfileRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.CreateAppProfile. - parent (str): - Required. The unique name of the instance in which to - create the new app profile. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile_id (str): - Required. The ID to be used when referring to the new - app profile within its instance, e.g., just - ``myprofile`` rather than - ``projects/myproject/instances/myinstance/appProfiles/myprofile``. - - This corresponds to the ``app_profile_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): - Required. The app profile to be created. Fields marked - ``OutputOnly`` will be ignored. - - This corresponds to the ``app_profile`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.AppProfile: - A configuration object describing how - Cloud Bigtable should treat traffic from - a particular end user application. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, app_profile_id, app_profile]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.CreateAppProfileRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.CreateAppProfileRequest): - request = bigtable_instance_admin.CreateAppProfileRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if app_profile_id is not None: - request.app_profile_id = app_profile_id - if app_profile is not None: - request.app_profile = app_profile - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_app_profile] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_app_profile(self, - request: Optional[Union[bigtable_instance_admin.GetAppProfileRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> instance.AppProfile: - r"""Gets information about an app profile. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetAppProfileRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.GetAppProfile. - name (str): - Required. The unique name of the requested app profile. - Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.AppProfile: - A configuration object describing how - Cloud Bigtable should treat traffic from - a particular end user application. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.GetAppProfileRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.GetAppProfileRequest): - request = bigtable_instance_admin.GetAppProfileRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_app_profile] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_app_profiles(self, - request: Optional[Union[bigtable_instance_admin.ListAppProfilesRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListAppProfilesPager: - r"""Lists information about app profiles in an instance. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.ListAppProfiles. - parent (str): - Required. The unique name of the instance for which a - list of app profiles is requested. Values are of the - form ``projects/{project}/instances/{instance}``. Use - ``{instance} = '-'`` to list AppProfiles for all - Instances in a project, e.g., - ``projects/myproject/instances/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListAppProfilesPager: - Response message for - BigtableInstanceAdmin.ListAppProfiles. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.ListAppProfilesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.ListAppProfilesRequest): - request = bigtable_instance_admin.ListAppProfilesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_app_profiles] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListAppProfilesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_app_profile(self, - request: Optional[Union[bigtable_instance_admin.UpdateAppProfileRequest, dict]] = None, - *, - app_profile: Optional[instance.AppProfile] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Updates an app profile within an instance. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateAppProfileRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.UpdateAppProfile. - app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): - Required. The app profile which will - (partially) replace the current value. - - This corresponds to the ``app_profile`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The subset of app profile - fields which should be replaced. If - unset, all fields will be replaced. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.AppProfile` A configuration object describing how Cloud Bigtable should treat traffic - from a particular end user application. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([app_profile, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.UpdateAppProfileRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.UpdateAppProfileRequest): - request = bigtable_instance_admin.UpdateAppProfileRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if app_profile is not None: - request.app_profile = app_profile - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_app_profile] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("app_profile.name", request.app_profile.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - instance.AppProfile, - metadata_type=bigtable_instance_admin.UpdateAppProfileMetadata, - ) - - # Done; return the response. - return response - - def delete_app_profile(self, - request: Optional[Union[bigtable_instance_admin.DeleteAppProfileRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes an app profile from an instance. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteAppProfileRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.DeleteAppProfile. - name (str): - Required. The unique name of the app profile to be - deleted. Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.DeleteAppProfileRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.DeleteAppProfileRequest): - request = bigtable_instance_admin.DeleteAppProfileRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_app_profile] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def get_iam_policy(self, - request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> policy_pb2.Policy: - r"""Gets the access control policy for an instance - resource. Returns an empty policy if an instance exists - but does not have a policy set. - - Args: - request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): - The request object. Request message for ``GetIamPolicy`` method. - resource (str): - REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.GetIamPolicyRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.GetIamPolicyRequest() - if resource is not None: - request.resource = resource - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("resource", request.resource), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_iam_policy(self, - request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> policy_pb2.Policy: - r"""Sets the access control policy on an instance - resource. Replaces any existing policy. - - Args: - request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): - The request object. Request message for ``SetIamPolicy`` method. - resource (str): - REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.SetIamPolicyRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.SetIamPolicyRequest() - if resource is not None: - request.resource = resource - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("resource", request.resource), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def test_iam_permissions(self, - request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, - *, - resource: Optional[str] = None, - permissions: Optional[MutableSequence[str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Returns permissions that the caller has on the - specified instance resource. - - Args: - request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): - The request object. Request message for ``TestIamPermissions`` method. - resource (str): - REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - permissions (MutableSequence[str]): - The set of permissions to check for the ``resource``. - Permissions with wildcards (such as '*' or 'storage.*') - are not allowed. For more information see `IAM - Overview `__. - - This corresponds to the ``permissions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: - Response message for TestIamPermissions method. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource, permissions]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.TestIamPermissionsRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.TestIamPermissionsRequest() - if resource is not None: - request.resource = resource - if permissions: - request.permissions.extend(permissions) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("resource", request.resource), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_hot_tablets(self, - request: Optional[Union[bigtable_instance_admin.ListHotTabletsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListHotTabletsPager: - r"""Lists hot tablets in a cluster, within the time range - provided. Hot tablets are ordered based on CPU usage. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest, dict]): - The request object. Request message for - BigtableInstanceAdmin.ListHotTablets. - parent (str): - Required. The cluster name to list hot tablets. Value is - in the following form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.pagers.ListHotTabletsPager: - Response message for - BigtableInstanceAdmin.ListHotTablets. - Iterating over this object will yield - results and resolve additional pages - automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_instance_admin.ListHotTabletsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_instance_admin.ListHotTabletsRequest): - request = bigtable_instance_admin.ListHotTabletsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_hot_tablets] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListHotTabletsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self) -> "BigtableInstanceAdminClient": - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - - - - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "BigtableInstanceAdminClient", -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py deleted file mode 100644 index d480cc430..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/pagers.py +++ /dev/null @@ -1,261 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import instance - - -class ListAppProfilesPager: - """A pager for iterating through ``list_app_profiles`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``app_profiles`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListAppProfiles`` requests and continue to iterate - through the ``app_profiles`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., bigtable_instance_admin.ListAppProfilesResponse], - request: bigtable_instance_admin.ListAppProfilesRequest, - response: bigtable_instance_admin.ListAppProfilesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = bigtable_instance_admin.ListAppProfilesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_instance_admin.ListAppProfilesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[instance.AppProfile]: - for page in self.pages: - yield from page.app_profiles - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListAppProfilesAsyncPager: - """A pager for iterating through ``list_app_profiles`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``app_profiles`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListAppProfiles`` requests and continue to iterate - through the ``app_profiles`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[bigtable_instance_admin.ListAppProfilesResponse]], - request: bigtable_instance_admin.ListAppProfilesRequest, - response: bigtable_instance_admin.ListAppProfilesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListAppProfilesRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListAppProfilesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = bigtable_instance_admin.ListAppProfilesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[bigtable_instance_admin.ListAppProfilesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - def __aiter__(self) -> AsyncIterator[instance.AppProfile]: - async def async_generator(): - async for page in self.pages: - for response in page.app_profiles: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListHotTabletsPager: - """A pager for iterating through ``list_hot_tablets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``hot_tablets`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListHotTablets`` requests and continue to iterate - through the ``hot_tablets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., bigtable_instance_admin.ListHotTabletsResponse], - request: bigtable_instance_admin.ListHotTabletsRequest, - response: bigtable_instance_admin.ListHotTabletsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = bigtable_instance_admin.ListHotTabletsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_instance_admin.ListHotTabletsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[instance.HotTablet]: - for page in self.pages: - yield from page.hot_tablets - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListHotTabletsAsyncPager: - """A pager for iterating through ``list_hot_tablets`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``hot_tablets`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListHotTablets`` requests and continue to iterate - through the ``hot_tablets`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[bigtable_instance_admin.ListHotTabletsResponse]], - request: bigtable_instance_admin.ListHotTabletsRequest, - response: bigtable_instance_admin.ListHotTabletsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListHotTabletsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListHotTabletsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = bigtable_instance_admin.ListHotTabletsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[bigtable_instance_admin.ListHotTabletsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - def __aiter__(self) -> AsyncIterator[instance.HotTablet]: - async def async_generator(): - async for page in self.pages: - for response in page.hot_tablets: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py deleted file mode 100644 index bfba48436..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import BigtableInstanceAdminTransport -from .grpc import BigtableInstanceAdminGrpcTransport -from .grpc_asyncio import BigtableInstanceAdminGrpcAsyncIOTransport -from .rest import BigtableInstanceAdminRestTransport -from .rest import BigtableInstanceAdminRestInterceptor - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[BigtableInstanceAdminTransport]] -_transport_registry['grpc'] = BigtableInstanceAdminGrpcTransport -_transport_registry['grpc_asyncio'] = BigtableInstanceAdminGrpcAsyncIOTransport -_transport_registry['rest'] = BigtableInstanceAdminRestTransport - -__all__ = ( - 'BigtableInstanceAdminTransport', - 'BigtableInstanceAdminGrpcTransport', - 'BigtableInstanceAdminGrpcAsyncIOTransport', - 'BigtableInstanceAdminRestTransport', - 'BigtableInstanceAdminRestInterceptor', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py deleted file mode 100644 index 2d215ef83..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/base.py +++ /dev/null @@ -1,540 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union - -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -class BigtableInstanceAdminTransport(abc.ABC): - """Abstract transport class for BigtableInstanceAdmin.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.cluster', - 'https://www.googleapis.com/auth/bigtable.admin.instance', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', - ) - - DEFAULT_HOST: str = 'bigtableadmin.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - # Don't apply audience if the credentials file passed from user. - if hasattr(credentials, "with_gdch_audience"): - credentials = credentials.with_gdch_audience(api_audience if api_audience else host) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - @property - def host(self): - return self._host - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_instance: gapic_v1.method.wrap_method( - self.create_instance, - default_timeout=300.0, - client_info=client_info, - ), - self.get_instance: gapic_v1.method.wrap_method( - self.get_instance, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_instances: gapic_v1.method.wrap_method( - self.list_instances, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_instance: gapic_v1.method.wrap_method( - self.update_instance, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.partial_update_instance: gapic_v1.method.wrap_method( - self.partial_update_instance, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.delete_instance: gapic_v1.method.wrap_method( - self.delete_instance, - default_timeout=60.0, - client_info=client_info, - ), - self.create_cluster: gapic_v1.method.wrap_method( - self.create_cluster, - default_timeout=60.0, - client_info=client_info, - ), - self.get_cluster: gapic_v1.method.wrap_method( - self.get_cluster, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_clusters: gapic_v1.method.wrap_method( - self.list_clusters, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_cluster: gapic_v1.method.wrap_method( - self.update_cluster, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.partial_update_cluster: gapic_v1.method.wrap_method( - self.partial_update_cluster, - default_timeout=None, - client_info=client_info, - ), - self.delete_cluster: gapic_v1.method.wrap_method( - self.delete_cluster, - default_timeout=60.0, - client_info=client_info, - ), - self.create_app_profile: gapic_v1.method.wrap_method( - self.create_app_profile, - default_timeout=60.0, - client_info=client_info, - ), - self.get_app_profile: gapic_v1.method.wrap_method( - self.get_app_profile, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_app_profiles: gapic_v1.method.wrap_method( - self.list_app_profiles, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_app_profile: gapic_v1.method.wrap_method( - self.update_app_profile, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.delete_app_profile: gapic_v1.method.wrap_method( - self.delete_app_profile, - default_timeout=60.0, - client_info=client_info, - ), - self.get_iam_policy: gapic_v1.method.wrap_method( - self.get_iam_policy, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.set_iam_policy: gapic_v1.method.wrap_method( - self.set_iam_policy, - default_timeout=60.0, - client_info=client_info, - ), - self.test_iam_permissions: gapic_v1.method.wrap_method( - self.test_iam_permissions, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_hot_tablets: gapic_v1.method.wrap_method( - self.list_hot_tablets, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_instance(self) -> Callable[ - [bigtable_instance_admin.CreateInstanceRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_instance(self) -> Callable[ - [bigtable_instance_admin.GetInstanceRequest], - Union[ - instance.Instance, - Awaitable[instance.Instance] - ]]: - raise NotImplementedError() - - @property - def list_instances(self) -> Callable[ - [bigtable_instance_admin.ListInstancesRequest], - Union[ - bigtable_instance_admin.ListInstancesResponse, - Awaitable[bigtable_instance_admin.ListInstancesResponse] - ]]: - raise NotImplementedError() - - @property - def update_instance(self) -> Callable[ - [instance.Instance], - Union[ - instance.Instance, - Awaitable[instance.Instance] - ]]: - raise NotImplementedError() - - @property - def partial_update_instance(self) -> Callable[ - [bigtable_instance_admin.PartialUpdateInstanceRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_instance(self) -> Callable[ - [bigtable_instance_admin.DeleteInstanceRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_cluster(self) -> Callable[ - [bigtable_instance_admin.CreateClusterRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_cluster(self) -> Callable[ - [bigtable_instance_admin.GetClusterRequest], - Union[ - instance.Cluster, - Awaitable[instance.Cluster] - ]]: - raise NotImplementedError() - - @property - def list_clusters(self) -> Callable[ - [bigtable_instance_admin.ListClustersRequest], - Union[ - bigtable_instance_admin.ListClustersResponse, - Awaitable[bigtable_instance_admin.ListClustersResponse] - ]]: - raise NotImplementedError() - - @property - def update_cluster(self) -> Callable[ - [instance.Cluster], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def partial_update_cluster(self) -> Callable[ - [bigtable_instance_admin.PartialUpdateClusterRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_cluster(self) -> Callable[ - [bigtable_instance_admin.DeleteClusterRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_app_profile(self) -> Callable[ - [bigtable_instance_admin.CreateAppProfileRequest], - Union[ - instance.AppProfile, - Awaitable[instance.AppProfile] - ]]: - raise NotImplementedError() - - @property - def get_app_profile(self) -> Callable[ - [bigtable_instance_admin.GetAppProfileRequest], - Union[ - instance.AppProfile, - Awaitable[instance.AppProfile] - ]]: - raise NotImplementedError() - - @property - def list_app_profiles(self) -> Callable[ - [bigtable_instance_admin.ListAppProfilesRequest], - Union[ - bigtable_instance_admin.ListAppProfilesResponse, - Awaitable[bigtable_instance_admin.ListAppProfilesResponse] - ]]: - raise NotImplementedError() - - @property - def update_app_profile(self) -> Callable[ - [bigtable_instance_admin.UpdateAppProfileRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_app_profile(self) -> Callable[ - [bigtable_instance_admin.DeleteAppProfileRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def get_iam_policy(self) -> Callable[ - [iam_policy_pb2.GetIamPolicyRequest], - Union[ - policy_pb2.Policy, - Awaitable[policy_pb2.Policy] - ]]: - raise NotImplementedError() - - @property - def set_iam_policy(self) -> Callable[ - [iam_policy_pb2.SetIamPolicyRequest], - Union[ - policy_pb2.Policy, - Awaitable[policy_pb2.Policy] - ]]: - raise NotImplementedError() - - @property - def test_iam_permissions(self) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - Union[ - iam_policy_pb2.TestIamPermissionsResponse, - Awaitable[iam_policy_pb2.TestIamPermissionsResponse] - ]]: - raise NotImplementedError() - - @property - def list_hot_tablets(self) -> Callable[ - [bigtable_instance_admin.ListHotTabletsRequest], - Union[ - bigtable_instance_admin.ListHotTabletsResponse, - Awaitable[bigtable_instance_admin.ListHotTabletsResponse] - ]]: - raise NotImplementedError() - - @property - def kind(self) -> str: - raise NotImplementedError() - - -__all__ = ( - 'BigtableInstanceAdminTransport', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py deleted file mode 100644 index d27c504db..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc.py +++ /dev/null @@ -1,849 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO - - -class BigtableInstanceAdminGrpcTransport(BigtableInstanceAdminTransport): - """gRPC backend transport for BigtableInstanceAdmin. - - Service for creating, configuring, and deleting Cloud - Bigtable Instances and Clusters. Provides access to the Instance - and Cluster schemas only, not the tables' metadata or data - stored in those tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'bigtableadmin.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[grpc.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'bigtableadmin.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_instance(self) -> Callable[ - [bigtable_instance_admin.CreateInstanceRequest], - operations_pb2.Operation]: - r"""Return a callable for the create instance method over gRPC. - - Create an instance within a project. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - Returns: - Callable[[~.CreateInstanceRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_instance' not in self._stubs: - self._stubs['create_instance'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance', - request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_instance'] - - @property - def get_instance(self) -> Callable[ - [bigtable_instance_admin.GetInstanceRequest], - instance.Instance]: - r"""Return a callable for the get instance method over gRPC. - - Gets information about an instance. - - Returns: - Callable[[~.GetInstanceRequest], - ~.Instance]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_instance' not in self._stubs: - self._stubs['get_instance'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance', - request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, - response_deserializer=instance.Instance.deserialize, - ) - return self._stubs['get_instance'] - - @property - def list_instances(self) -> Callable[ - [bigtable_instance_admin.ListInstancesRequest], - bigtable_instance_admin.ListInstancesResponse]: - r"""Return a callable for the list instances method over gRPC. - - Lists information about instances in a project. - - Returns: - Callable[[~.ListInstancesRequest], - ~.ListInstancesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_instances' not in self._stubs: - self._stubs['list_instances'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances', - request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, - response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, - ) - return self._stubs['list_instances'] - - @property - def update_instance(self) -> Callable[ - [instance.Instance], - instance.Instance]: - r"""Return a callable for the update instance method over gRPC. - - Updates an instance within a project. This method - updates only the display name and type for an Instance. - To update other Instance properties, such as labels, use - PartialUpdateInstance. - - Returns: - Callable[[~.Instance], - ~.Instance]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_instance' not in self._stubs: - self._stubs['update_instance'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance', - request_serializer=instance.Instance.serialize, - response_deserializer=instance.Instance.deserialize, - ) - return self._stubs['update_instance'] - - @property - def partial_update_instance(self) -> Callable[ - [bigtable_instance_admin.PartialUpdateInstanceRequest], - operations_pb2.Operation]: - r"""Return a callable for the partial update instance method over gRPC. - - Partially updates an instance within a project. This - method can modify all fields of an Instance and is the - preferred way to update an Instance. - - Returns: - Callable[[~.PartialUpdateInstanceRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'partial_update_instance' not in self._stubs: - self._stubs['partial_update_instance'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance', - request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['partial_update_instance'] - - @property - def delete_instance(self) -> Callable[ - [bigtable_instance_admin.DeleteInstanceRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete instance method over gRPC. - - Delete an instance from a project. - - Returns: - Callable[[~.DeleteInstanceRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_instance' not in self._stubs: - self._stubs['delete_instance'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance', - request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_instance'] - - @property - def create_cluster(self) -> Callable[ - [bigtable_instance_admin.CreateClusterRequest], - operations_pb2.Operation]: - r"""Return a callable for the create cluster method over gRPC. - - Creates a cluster within an instance. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - Returns: - Callable[[~.CreateClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_cluster' not in self._stubs: - self._stubs['create_cluster'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster', - request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_cluster'] - - @property - def get_cluster(self) -> Callable[ - [bigtable_instance_admin.GetClusterRequest], - instance.Cluster]: - r"""Return a callable for the get cluster method over gRPC. - - Gets information about a cluster. - - Returns: - Callable[[~.GetClusterRequest], - ~.Cluster]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_cluster' not in self._stubs: - self._stubs['get_cluster'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster', - request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, - response_deserializer=instance.Cluster.deserialize, - ) - return self._stubs['get_cluster'] - - @property - def list_clusters(self) -> Callable[ - [bigtable_instance_admin.ListClustersRequest], - bigtable_instance_admin.ListClustersResponse]: - r"""Return a callable for the list clusters method over gRPC. - - Lists information about clusters in an instance. - - Returns: - Callable[[~.ListClustersRequest], - ~.ListClustersResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_clusters' not in self._stubs: - self._stubs['list_clusters'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters', - request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, - response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, - ) - return self._stubs['list_clusters'] - - @property - def update_cluster(self) -> Callable[ - [instance.Cluster], - operations_pb2.Operation]: - r"""Return a callable for the update cluster method over gRPC. - - Updates a cluster within an instance. - - Note that UpdateCluster does not support updating - cluster_config.cluster_autoscaling_config. In order to update - it, you must use PartialUpdateCluster. - - Returns: - Callable[[~.Cluster], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_cluster' not in self._stubs: - self._stubs['update_cluster'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster', - request_serializer=instance.Cluster.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_cluster'] - - @property - def partial_update_cluster(self) -> Callable[ - [bigtable_instance_admin.PartialUpdateClusterRequest], - operations_pb2.Operation]: - r"""Return a callable for the partial update cluster method over gRPC. - - Partially updates a cluster within a project. This method is the - preferred way to update a Cluster. - - To enable and update autoscaling, set - cluster_config.cluster_autoscaling_config. When autoscaling is - enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning - that updates to it are ignored. Note that an update cannot - simultaneously set serve_nodes to non-zero and - cluster_config.cluster_autoscaling_config to non-empty, and also - specify both in the update_mask. - - To disable autoscaling, clear - cluster_config.cluster_autoscaling_config, and explicitly set a - serve_node count via the update_mask. - - Returns: - Callable[[~.PartialUpdateClusterRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'partial_update_cluster' not in self._stubs: - self._stubs['partial_update_cluster'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster', - request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['partial_update_cluster'] - - @property - def delete_cluster(self) -> Callable[ - [bigtable_instance_admin.DeleteClusterRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete cluster method over gRPC. - - Deletes a cluster from an instance. - - Returns: - Callable[[~.DeleteClusterRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_cluster' not in self._stubs: - self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster', - request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_cluster'] - - @property - def create_app_profile(self) -> Callable[ - [bigtable_instance_admin.CreateAppProfileRequest], - instance.AppProfile]: - r"""Return a callable for the create app profile method over gRPC. - - Creates an app profile within an instance. - - Returns: - Callable[[~.CreateAppProfileRequest], - ~.AppProfile]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_app_profile' not in self._stubs: - self._stubs['create_app_profile'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile', - request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, - response_deserializer=instance.AppProfile.deserialize, - ) - return self._stubs['create_app_profile'] - - @property - def get_app_profile(self) -> Callable[ - [bigtable_instance_admin.GetAppProfileRequest], - instance.AppProfile]: - r"""Return a callable for the get app profile method over gRPC. - - Gets information about an app profile. - - Returns: - Callable[[~.GetAppProfileRequest], - ~.AppProfile]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_app_profile' not in self._stubs: - self._stubs['get_app_profile'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile', - request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, - response_deserializer=instance.AppProfile.deserialize, - ) - return self._stubs['get_app_profile'] - - @property - def list_app_profiles(self) -> Callable[ - [bigtable_instance_admin.ListAppProfilesRequest], - bigtable_instance_admin.ListAppProfilesResponse]: - r"""Return a callable for the list app profiles method over gRPC. - - Lists information about app profiles in an instance. - - Returns: - Callable[[~.ListAppProfilesRequest], - ~.ListAppProfilesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_app_profiles' not in self._stubs: - self._stubs['list_app_profiles'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles', - request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, - response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, - ) - return self._stubs['list_app_profiles'] - - @property - def update_app_profile(self) -> Callable[ - [bigtable_instance_admin.UpdateAppProfileRequest], - operations_pb2.Operation]: - r"""Return a callable for the update app profile method over gRPC. - - Updates an app profile within an instance. - - Returns: - Callable[[~.UpdateAppProfileRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_app_profile' not in self._stubs: - self._stubs['update_app_profile'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile', - request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_app_profile'] - - @property - def delete_app_profile(self) -> Callable[ - [bigtable_instance_admin.DeleteAppProfileRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete app profile method over gRPC. - - Deletes an app profile from an instance. - - Returns: - Callable[[~.DeleteAppProfileRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_app_profile' not in self._stubs: - self._stubs['delete_app_profile'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile', - request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_app_profile'] - - @property - def get_iam_policy(self) -> Callable[ - [iam_policy_pb2.GetIamPolicyRequest], - policy_pb2.Policy]: - r"""Return a callable for the get iam policy method over gRPC. - - Gets the access control policy for an instance - resource. Returns an empty policy if an instance exists - but does not have a policy set. - - Returns: - Callable[[~.GetIamPolicyRequest], - ~.Policy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_iam_policy' not in self._stubs: - self._stubs['get_iam_policy'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy', - request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs['get_iam_policy'] - - @property - def set_iam_policy(self) -> Callable[ - [iam_policy_pb2.SetIamPolicyRequest], - policy_pb2.Policy]: - r"""Return a callable for the set iam policy method over gRPC. - - Sets the access control policy on an instance - resource. Replaces any existing policy. - - Returns: - Callable[[~.SetIamPolicyRequest], - ~.Policy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_iam_policy' not in self._stubs: - self._stubs['set_iam_policy'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy', - request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs['set_iam_policy'] - - @property - def test_iam_permissions(self) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - iam_policy_pb2.TestIamPermissionsResponse]: - r"""Return a callable for the test iam permissions method over gRPC. - - Returns permissions that the caller has on the - specified instance resource. - - Returns: - Callable[[~.TestIamPermissionsRequest], - ~.TestIamPermissionsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'test_iam_permissions' not in self._stubs: - self._stubs['test_iam_permissions'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions', - request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, - ) - return self._stubs['test_iam_permissions'] - - @property - def list_hot_tablets(self) -> Callable[ - [bigtable_instance_admin.ListHotTabletsRequest], - bigtable_instance_admin.ListHotTabletsResponse]: - r"""Return a callable for the list hot tablets method over gRPC. - - Lists hot tablets in a cluster, within the time range - provided. Hot tablets are ordered based on CPU usage. - - Returns: - Callable[[~.ListHotTabletsRequest], - ~.ListHotTabletsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_hot_tablets' not in self._stubs: - self._stubs['list_hot_tablets'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets', - request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, - response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, - ) - return self._stubs['list_hot_tablets'] - - def close(self): - self.grpc_channel.close() - - @property - def kind(self) -> str: - return "grpc" - - -__all__ = ( - 'BigtableInstanceAdminGrpcTransport', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py deleted file mode 100644 index e9bf6e6ab..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/grpc_asyncio.py +++ /dev/null @@ -1,848 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO -from .grpc import BigtableInstanceAdminGrpcTransport - - -class BigtableInstanceAdminGrpcAsyncIOTransport(BigtableInstanceAdminTransport): - """gRPC AsyncIO backend transport for BigtableInstanceAdmin. - - Service for creating, configuring, and deleting Cloud - Bigtable Instances and Clusters. Provides access to the Instance - and Cluster schemas only, not the tables' metadata or data - stored in those tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'bigtableadmin.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'bigtableadmin.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[aio.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_instance(self) -> Callable[ - [bigtable_instance_admin.CreateInstanceRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create instance method over gRPC. - - Create an instance within a project. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - Returns: - Callable[[~.CreateInstanceRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_instance' not in self._stubs: - self._stubs['create_instance'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateInstance', - request_serializer=bigtable_instance_admin.CreateInstanceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_instance'] - - @property - def get_instance(self) -> Callable[ - [bigtable_instance_admin.GetInstanceRequest], - Awaitable[instance.Instance]]: - r"""Return a callable for the get instance method over gRPC. - - Gets information about an instance. - - Returns: - Callable[[~.GetInstanceRequest], - Awaitable[~.Instance]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_instance' not in self._stubs: - self._stubs['get_instance'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetInstance', - request_serializer=bigtable_instance_admin.GetInstanceRequest.serialize, - response_deserializer=instance.Instance.deserialize, - ) - return self._stubs['get_instance'] - - @property - def list_instances(self) -> Callable[ - [bigtable_instance_admin.ListInstancesRequest], - Awaitable[bigtable_instance_admin.ListInstancesResponse]]: - r"""Return a callable for the list instances method over gRPC. - - Lists information about instances in a project. - - Returns: - Callable[[~.ListInstancesRequest], - Awaitable[~.ListInstancesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_instances' not in self._stubs: - self._stubs['list_instances'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListInstances', - request_serializer=bigtable_instance_admin.ListInstancesRequest.serialize, - response_deserializer=bigtable_instance_admin.ListInstancesResponse.deserialize, - ) - return self._stubs['list_instances'] - - @property - def update_instance(self) -> Callable[ - [instance.Instance], - Awaitable[instance.Instance]]: - r"""Return a callable for the update instance method over gRPC. - - Updates an instance within a project. This method - updates only the display name and type for an Instance. - To update other Instance properties, such as labels, use - PartialUpdateInstance. - - Returns: - Callable[[~.Instance], - Awaitable[~.Instance]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_instance' not in self._stubs: - self._stubs['update_instance'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateInstance', - request_serializer=instance.Instance.serialize, - response_deserializer=instance.Instance.deserialize, - ) - return self._stubs['update_instance'] - - @property - def partial_update_instance(self) -> Callable[ - [bigtable_instance_admin.PartialUpdateInstanceRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the partial update instance method over gRPC. - - Partially updates an instance within a project. This - method can modify all fields of an Instance and is the - preferred way to update an Instance. - - Returns: - Callable[[~.PartialUpdateInstanceRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'partial_update_instance' not in self._stubs: - self._stubs['partial_update_instance'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateInstance', - request_serializer=bigtable_instance_admin.PartialUpdateInstanceRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['partial_update_instance'] - - @property - def delete_instance(self) -> Callable[ - [bigtable_instance_admin.DeleteInstanceRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete instance method over gRPC. - - Delete an instance from a project. - - Returns: - Callable[[~.DeleteInstanceRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_instance' not in self._stubs: - self._stubs['delete_instance'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteInstance', - request_serializer=bigtable_instance_admin.DeleteInstanceRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_instance'] - - @property - def create_cluster(self) -> Callable[ - [bigtable_instance_admin.CreateClusterRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create cluster method over gRPC. - - Creates a cluster within an instance. - - Note that exactly one of Cluster.serve_nodes and - Cluster.cluster_config.cluster_autoscaling_config can be set. If - serve_nodes is set to non-zero, then the cluster is manually - scaled. If cluster_config.cluster_autoscaling_config is - non-empty, then autoscaling is enabled. - - Returns: - Callable[[~.CreateClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_cluster' not in self._stubs: - self._stubs['create_cluster'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateCluster', - request_serializer=bigtable_instance_admin.CreateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_cluster'] - - @property - def get_cluster(self) -> Callable[ - [bigtable_instance_admin.GetClusterRequest], - Awaitable[instance.Cluster]]: - r"""Return a callable for the get cluster method over gRPC. - - Gets information about a cluster. - - Returns: - Callable[[~.GetClusterRequest], - Awaitable[~.Cluster]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_cluster' not in self._stubs: - self._stubs['get_cluster'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetCluster', - request_serializer=bigtable_instance_admin.GetClusterRequest.serialize, - response_deserializer=instance.Cluster.deserialize, - ) - return self._stubs['get_cluster'] - - @property - def list_clusters(self) -> Callable[ - [bigtable_instance_admin.ListClustersRequest], - Awaitable[bigtable_instance_admin.ListClustersResponse]]: - r"""Return a callable for the list clusters method over gRPC. - - Lists information about clusters in an instance. - - Returns: - Callable[[~.ListClustersRequest], - Awaitable[~.ListClustersResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_clusters' not in self._stubs: - self._stubs['list_clusters'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListClusters', - request_serializer=bigtable_instance_admin.ListClustersRequest.serialize, - response_deserializer=bigtable_instance_admin.ListClustersResponse.deserialize, - ) - return self._stubs['list_clusters'] - - @property - def update_cluster(self) -> Callable[ - [instance.Cluster], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update cluster method over gRPC. - - Updates a cluster within an instance. - - Note that UpdateCluster does not support updating - cluster_config.cluster_autoscaling_config. In order to update - it, you must use PartialUpdateCluster. - - Returns: - Callable[[~.Cluster], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_cluster' not in self._stubs: - self._stubs['update_cluster'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateCluster', - request_serializer=instance.Cluster.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_cluster'] - - @property - def partial_update_cluster(self) -> Callable[ - [bigtable_instance_admin.PartialUpdateClusterRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the partial update cluster method over gRPC. - - Partially updates a cluster within a project. This method is the - preferred way to update a Cluster. - - To enable and update autoscaling, set - cluster_config.cluster_autoscaling_config. When autoscaling is - enabled, serve_nodes is treated as an OUTPUT_ONLY field, meaning - that updates to it are ignored. Note that an update cannot - simultaneously set serve_nodes to non-zero and - cluster_config.cluster_autoscaling_config to non-empty, and also - specify both in the update_mask. - - To disable autoscaling, clear - cluster_config.cluster_autoscaling_config, and explicitly set a - serve_node count via the update_mask. - - Returns: - Callable[[~.PartialUpdateClusterRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'partial_update_cluster' not in self._stubs: - self._stubs['partial_update_cluster'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/PartialUpdateCluster', - request_serializer=bigtable_instance_admin.PartialUpdateClusterRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['partial_update_cluster'] - - @property - def delete_cluster(self) -> Callable[ - [bigtable_instance_admin.DeleteClusterRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete cluster method over gRPC. - - Deletes a cluster from an instance. - - Returns: - Callable[[~.DeleteClusterRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_cluster' not in self._stubs: - self._stubs['delete_cluster'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteCluster', - request_serializer=bigtable_instance_admin.DeleteClusterRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_cluster'] - - @property - def create_app_profile(self) -> Callable[ - [bigtable_instance_admin.CreateAppProfileRequest], - Awaitable[instance.AppProfile]]: - r"""Return a callable for the create app profile method over gRPC. - - Creates an app profile within an instance. - - Returns: - Callable[[~.CreateAppProfileRequest], - Awaitable[~.AppProfile]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_app_profile' not in self._stubs: - self._stubs['create_app_profile'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/CreateAppProfile', - request_serializer=bigtable_instance_admin.CreateAppProfileRequest.serialize, - response_deserializer=instance.AppProfile.deserialize, - ) - return self._stubs['create_app_profile'] - - @property - def get_app_profile(self) -> Callable[ - [bigtable_instance_admin.GetAppProfileRequest], - Awaitable[instance.AppProfile]]: - r"""Return a callable for the get app profile method over gRPC. - - Gets information about an app profile. - - Returns: - Callable[[~.GetAppProfileRequest], - Awaitable[~.AppProfile]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_app_profile' not in self._stubs: - self._stubs['get_app_profile'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetAppProfile', - request_serializer=bigtable_instance_admin.GetAppProfileRequest.serialize, - response_deserializer=instance.AppProfile.deserialize, - ) - return self._stubs['get_app_profile'] - - @property - def list_app_profiles(self) -> Callable[ - [bigtable_instance_admin.ListAppProfilesRequest], - Awaitable[bigtable_instance_admin.ListAppProfilesResponse]]: - r"""Return a callable for the list app profiles method over gRPC. - - Lists information about app profiles in an instance. - - Returns: - Callable[[~.ListAppProfilesRequest], - Awaitable[~.ListAppProfilesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_app_profiles' not in self._stubs: - self._stubs['list_app_profiles'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListAppProfiles', - request_serializer=bigtable_instance_admin.ListAppProfilesRequest.serialize, - response_deserializer=bigtable_instance_admin.ListAppProfilesResponse.deserialize, - ) - return self._stubs['list_app_profiles'] - - @property - def update_app_profile(self) -> Callable[ - [bigtable_instance_admin.UpdateAppProfileRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update app profile method over gRPC. - - Updates an app profile within an instance. - - Returns: - Callable[[~.UpdateAppProfileRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_app_profile' not in self._stubs: - self._stubs['update_app_profile'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/UpdateAppProfile', - request_serializer=bigtable_instance_admin.UpdateAppProfileRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_app_profile'] - - @property - def delete_app_profile(self) -> Callable[ - [bigtable_instance_admin.DeleteAppProfileRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete app profile method over gRPC. - - Deletes an app profile from an instance. - - Returns: - Callable[[~.DeleteAppProfileRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_app_profile' not in self._stubs: - self._stubs['delete_app_profile'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/DeleteAppProfile', - request_serializer=bigtable_instance_admin.DeleteAppProfileRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_app_profile'] - - @property - def get_iam_policy(self) -> Callable[ - [iam_policy_pb2.GetIamPolicyRequest], - Awaitable[policy_pb2.Policy]]: - r"""Return a callable for the get iam policy method over gRPC. - - Gets the access control policy for an instance - resource. Returns an empty policy if an instance exists - but does not have a policy set. - - Returns: - Callable[[~.GetIamPolicyRequest], - Awaitable[~.Policy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_iam_policy' not in self._stubs: - self._stubs['get_iam_policy'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/GetIamPolicy', - request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs['get_iam_policy'] - - @property - def set_iam_policy(self) -> Callable[ - [iam_policy_pb2.SetIamPolicyRequest], - Awaitable[policy_pb2.Policy]]: - r"""Return a callable for the set iam policy method over gRPC. - - Sets the access control policy on an instance - resource. Replaces any existing policy. - - Returns: - Callable[[~.SetIamPolicyRequest], - Awaitable[~.Policy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_iam_policy' not in self._stubs: - self._stubs['set_iam_policy'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/SetIamPolicy', - request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs['set_iam_policy'] - - @property - def test_iam_permissions(self) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - Awaitable[iam_policy_pb2.TestIamPermissionsResponse]]: - r"""Return a callable for the test iam permissions method over gRPC. - - Returns permissions that the caller has on the - specified instance resource. - - Returns: - Callable[[~.TestIamPermissionsRequest], - Awaitable[~.TestIamPermissionsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'test_iam_permissions' not in self._stubs: - self._stubs['test_iam_permissions'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/TestIamPermissions', - request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, - ) - return self._stubs['test_iam_permissions'] - - @property - def list_hot_tablets(self) -> Callable[ - [bigtable_instance_admin.ListHotTabletsRequest], - Awaitable[bigtable_instance_admin.ListHotTabletsResponse]]: - r"""Return a callable for the list hot tablets method over gRPC. - - Lists hot tablets in a cluster, within the time range - provided. Hot tablets are ordered based on CPU usage. - - Returns: - Callable[[~.ListHotTabletsRequest], - Awaitable[~.ListHotTabletsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_hot_tablets' not in self._stubs: - self._stubs['list_hot_tablets'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableInstanceAdmin/ListHotTablets', - request_serializer=bigtable_instance_admin.ListHotTabletsRequest.serialize, - response_deserializer=bigtable_instance_admin.ListHotTabletsResponse.deserialize, - ) - return self._stubs['list_hot_tablets'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'BigtableInstanceAdminGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py deleted file mode 100644 index 4e2f47a55..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_instance_admin/transports/rest.py +++ /dev/null @@ -1,2776 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore -import grpc # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.api_core import exceptions as core_exceptions -from google.api_core import retry as retries -from google.api_core import rest_helpers -from google.api_core import rest_streaming -from google.api_core import path_template -from google.api_core import gapic_v1 - -from google.protobuf import json_format -from google.api_core import operations_v1 -from requests import __version__ as requests_version -import dataclasses -import re -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union -import warnings - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - - -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore - -from .base import BigtableInstanceAdminTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, - grpc_version=None, - rest_version=requests_version, -) - - -class BigtableInstanceAdminRestInterceptor: - """Interceptor for BigtableInstanceAdmin. - - Interceptors are used to manipulate requests, request metadata, and responses - in arbitrary ways. - Example use cases include: - * Logging - * Verifying requests according to service or custom semantics - * Stripping extraneous information from responses - - These use cases and more can be enabled by injecting an - instance of a custom subclass when constructing the BigtableInstanceAdminRestTransport. - - .. code-block:: python - class MyCustomBigtableInstanceAdminInterceptor(BigtableInstanceAdminRestInterceptor): - def pre_create_app_profile(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_app_profile(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_cluster(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_cluster(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_instance(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_instance(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_delete_app_profile(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_delete_cluster(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_delete_instance(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_get_app_profile(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_app_profile(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_cluster(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_cluster(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_iam_policy(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_iam_policy(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_instance(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_instance(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_app_profiles(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_app_profiles(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_clusters(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_clusters(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_hot_tablets(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_hot_tablets(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_instances(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_instances(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_partial_update_cluster(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_partial_update_cluster(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_partial_update_instance(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_partial_update_instance(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_set_iam_policy(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_set_iam_policy(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_test_iam_permissions(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_test_iam_permissions(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_app_profile(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_app_profile(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_cluster(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_cluster(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_instance(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_instance(self, response): - logging.log(f"Received response: {response}") - return response - - transport = BigtableInstanceAdminRestTransport(interceptor=MyCustomBigtableInstanceAdminInterceptor()) - client = BigtableInstanceAdminClient(transport=transport) - - - """ - def pre_create_app_profile(self, request: bigtable_instance_admin.CreateAppProfileRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.CreateAppProfileRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for create_app_profile - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_create_app_profile(self, response: instance.AppProfile) -> instance.AppProfile: - """Post-rpc interceptor for create_app_profile - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_create_cluster(self, request: bigtable_instance_admin.CreateClusterRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.CreateClusterRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for create_cluster - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_create_cluster(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for create_cluster - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_create_instance(self, request: bigtable_instance_admin.CreateInstanceRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.CreateInstanceRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for create_instance - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_create_instance(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for create_instance - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_delete_app_profile(self, request: bigtable_instance_admin.DeleteAppProfileRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.DeleteAppProfileRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for delete_app_profile - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def pre_delete_cluster(self, request: bigtable_instance_admin.DeleteClusterRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.DeleteClusterRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for delete_cluster - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def pre_delete_instance(self, request: bigtable_instance_admin.DeleteInstanceRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.DeleteInstanceRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for delete_instance - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def pre_get_app_profile(self, request: bigtable_instance_admin.GetAppProfileRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.GetAppProfileRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_app_profile - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_get_app_profile(self, response: instance.AppProfile) -> instance.AppProfile: - """Post-rpc interceptor for get_app_profile - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_get_cluster(self, request: bigtable_instance_admin.GetClusterRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.GetClusterRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_cluster - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_get_cluster(self, response: instance.Cluster) -> instance.Cluster: - """Post-rpc interceptor for get_cluster - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_get_iam_policy(self, request: iam_policy_pb2.GetIamPolicyRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_iam_policy - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: - """Post-rpc interceptor for get_iam_policy - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_get_instance(self, request: bigtable_instance_admin.GetInstanceRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.GetInstanceRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_instance - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_get_instance(self, response: instance.Instance) -> instance.Instance: - """Post-rpc interceptor for get_instance - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_list_app_profiles(self, request: bigtable_instance_admin.ListAppProfilesRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.ListAppProfilesRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_app_profiles - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_list_app_profiles(self, response: bigtable_instance_admin.ListAppProfilesResponse) -> bigtable_instance_admin.ListAppProfilesResponse: - """Post-rpc interceptor for list_app_profiles - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_list_clusters(self, request: bigtable_instance_admin.ListClustersRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.ListClustersRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_clusters - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_list_clusters(self, response: bigtable_instance_admin.ListClustersResponse) -> bigtable_instance_admin.ListClustersResponse: - """Post-rpc interceptor for list_clusters - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_list_hot_tablets(self, request: bigtable_instance_admin.ListHotTabletsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.ListHotTabletsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_hot_tablets - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_list_hot_tablets(self, response: bigtable_instance_admin.ListHotTabletsResponse) -> bigtable_instance_admin.ListHotTabletsResponse: - """Post-rpc interceptor for list_hot_tablets - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_list_instances(self, request: bigtable_instance_admin.ListInstancesRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.ListInstancesRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_instances - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_list_instances(self, response: bigtable_instance_admin.ListInstancesResponse) -> bigtable_instance_admin.ListInstancesResponse: - """Post-rpc interceptor for list_instances - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_partial_update_cluster(self, request: bigtable_instance_admin.PartialUpdateClusterRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.PartialUpdateClusterRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for partial_update_cluster - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_partial_update_cluster(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for partial_update_cluster - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_partial_update_instance(self, request: bigtable_instance_admin.PartialUpdateInstanceRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.PartialUpdateInstanceRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for partial_update_instance - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_partial_update_instance(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for partial_update_instance - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_set_iam_policy(self, request: iam_policy_pb2.SetIamPolicyRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for set_iam_policy - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: - """Post-rpc interceptor for set_iam_policy - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_test_iam_permissions(self, request: iam_policy_pb2.TestIamPermissionsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for test_iam_permissions - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_test_iam_permissions(self, response: iam_policy_pb2.TestIamPermissionsResponse) -> iam_policy_pb2.TestIamPermissionsResponse: - """Post-rpc interceptor for test_iam_permissions - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_update_app_profile(self, request: bigtable_instance_admin.UpdateAppProfileRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_instance_admin.UpdateAppProfileRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for update_app_profile - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_update_app_profile(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for update_app_profile - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_update_cluster(self, request: instance.Cluster, metadata: Sequence[Tuple[str, str]]) -> Tuple[instance.Cluster, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for update_cluster - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_update_cluster(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for update_cluster - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - def pre_update_instance(self, request: instance.Instance, metadata: Sequence[Tuple[str, str]]) -> Tuple[instance.Instance, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for update_instance - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableInstanceAdmin server. - """ - return request, metadata - - def post_update_instance(self, response: instance.Instance) -> instance.Instance: - """Post-rpc interceptor for update_instance - - Override in a subclass to manipulate the response - after it is returned by the BigtableInstanceAdmin server but before - it is returned to user code. - """ - return response - - -@dataclasses.dataclass -class BigtableInstanceAdminRestStub: - _session: AuthorizedSession - _host: str - _interceptor: BigtableInstanceAdminRestInterceptor - - -class BigtableInstanceAdminRestTransport(BigtableInstanceAdminTransport): - """REST backend transport for BigtableInstanceAdmin. - - Service for creating, configuring, and deleting Cloud - Bigtable Instances and Clusters. Provides access to the Instance - and Cluster schemas only, not the tables' metadata or data - stored in those tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends JSON representations of protocol buffers over HTTP/1.1 - - """ - - def __init__(self, *, - host: str = 'bigtableadmin.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - client_cert_source_for_mtls: Optional[Callable[[ - ], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - url_scheme: str = 'https', - interceptor: Optional[BigtableInstanceAdminRestInterceptor] = None, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. - """ - # Run the base constructor - # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. - # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the - # credentials object - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - - super().__init__( - host=host, - credentials=credentials, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience - ) - self._session = AuthorizedSession( - self._credentials, default_host=self.DEFAULT_HOST) - self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None - if client_cert_source_for_mtls: - self._session.configure_mtls_channel(client_cert_source_for_mtls) - self._interceptor = interceptor or BigtableInstanceAdminRestInterceptor() - self._prep_wrapped_messages(client_info) - - @property - def operations_client(self) -> operations_v1.AbstractOperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Only create a new client if we do not already have one. - if self._operations_client is None: - http_options: Dict[str, List[Dict[str, str]]] = { - 'google.longrunning.Operations.CancelOperation': [ - { - 'method': 'post', - 'uri': '/v2/{name=operations/**}:cancel', - }, - ], - 'google.longrunning.Operations.DeleteOperation': [ - { - 'method': 'delete', - 'uri': '/v2/{name=operations/**}', - }, - ], - 'google.longrunning.Operations.GetOperation': [ - { - 'method': 'get', - 'uri': '/v2/{name=operations/**}', - }, - ], - 'google.longrunning.Operations.ListOperations': [ - { - 'method': 'get', - 'uri': '/v2/{name=operations/projects/**}/operations', - }, - ], - } - - rest_transport = operations_v1.OperationsRestTransport( - host=self._host, - # use the credentials which are saved - credentials=self._credentials, - scopes=self._scopes, - http_options=http_options, - path_prefix="v2") - - self._operations_client = operations_v1.AbstractOperationsClient(transport=rest_transport) - - # Return the client from cache. - return self._operations_client - - class _CreateAppProfile(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("CreateAppProfile") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "appProfileId" : "", } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.CreateAppProfileRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> instance.AppProfile: - r"""Call the create app profile method over HTTP. - - Args: - request (~.bigtable_instance_admin.CreateAppProfileRequest): - The request object. Request message for - BigtableInstanceAdmin.CreateAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.instance.AppProfile: - A configuration object describing how - Cloud Bigtable should treat traffic from - a particular end user application. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{parent=projects/*/instances/*}/appProfiles', - 'body': 'app_profile', - }, - ] - request, metadata = self._interceptor.pre_create_app_profile(request, metadata) - pb_request = bigtable_instance_admin.CreateAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = instance.AppProfile() - pb_resp = instance.AppProfile.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_create_app_profile(resp) - return resp - - class _CreateCluster(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("CreateCluster") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "clusterId" : "", } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.CreateClusterRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the create cluster method over HTTP. - - Args: - request (~.bigtable_instance_admin.CreateClusterRequest): - The request object. Request message for - BigtableInstanceAdmin.CreateCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{parent=projects/*/instances/*}/clusters', - 'body': 'cluster', - }, - ] - request, metadata = self._interceptor.pre_create_cluster(request, metadata) - pb_request = bigtable_instance_admin.CreateClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_create_cluster(resp) - return resp - - class _CreateInstance(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("CreateInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.CreateInstanceRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the create instance method over HTTP. - - Args: - request (~.bigtable_instance_admin.CreateInstanceRequest): - The request object. Request message for - BigtableInstanceAdmin.CreateInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{parent=projects/*}/instances', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_create_instance(request, metadata) - pb_request = bigtable_instance_admin.CreateInstanceRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_create_instance(resp) - return resp - - class _DeleteAppProfile(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("DeleteAppProfile") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "ignoreWarnings" : False, } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.DeleteAppProfileRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ): - r"""Call the delete app profile method over HTTP. - - Args: - request (~.bigtable_instance_admin.DeleteAppProfileRequest): - The request object. Request message for - BigtableInstanceAdmin.DeleteAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'delete', - 'uri': '/v2/{name=projects/*/instances/*/appProfiles/*}', - }, - ] - request, metadata = self._interceptor.pre_delete_app_profile(request, metadata) - pb_request = bigtable_instance_admin.DeleteAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DeleteCluster(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("DeleteCluster") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.DeleteClusterRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ): - r"""Call the delete cluster method over HTTP. - - Args: - request (~.bigtable_instance_admin.DeleteClusterRequest): - The request object. Request message for - BigtableInstanceAdmin.DeleteCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'delete', - 'uri': '/v2/{name=projects/*/instances/*/clusters/*}', - }, - ] - request, metadata = self._interceptor.pre_delete_cluster(request, metadata) - pb_request = bigtable_instance_admin.DeleteClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DeleteInstance(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("DeleteInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.DeleteInstanceRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ): - r"""Call the delete instance method over HTTP. - - Args: - request (~.bigtable_instance_admin.DeleteInstanceRequest): - The request object. Request message for - BigtableInstanceAdmin.DeleteInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'delete', - 'uri': '/v2/{name=projects/*/instances/*}', - }, - ] - request, metadata = self._interceptor.pre_delete_instance(request, metadata) - pb_request = bigtable_instance_admin.DeleteInstanceRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _GetAppProfile(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("GetAppProfile") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.GetAppProfileRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> instance.AppProfile: - r"""Call the get app profile method over HTTP. - - Args: - request (~.bigtable_instance_admin.GetAppProfileRequest): - The request object. Request message for - BigtableInstanceAdmin.GetAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.instance.AppProfile: - A configuration object describing how - Cloud Bigtable should treat traffic from - a particular end user application. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{name=projects/*/instances/*/appProfiles/*}', - }, - ] - request, metadata = self._interceptor.pre_get_app_profile(request, metadata) - pb_request = bigtable_instance_admin.GetAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = instance.AppProfile() - pb_resp = instance.AppProfile.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_app_profile(resp) - return resp - - class _GetCluster(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("GetCluster") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.GetClusterRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> instance.Cluster: - r"""Call the get cluster method over HTTP. - - Args: - request (~.bigtable_instance_admin.GetClusterRequest): - The request object. Request message for - BigtableInstanceAdmin.GetCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.instance.Cluster: - A resizable group of nodes in a particular cloud - location, capable of serving all - [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{name=projects/*/instances/*/clusters/*}', - }, - ] - request, metadata = self._interceptor.pre_get_cluster(request, metadata) - pb_request = bigtable_instance_admin.GetClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = instance.Cluster() - pb_resp = instance.Cluster.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_cluster(resp) - return resp - - class _GetIamPolicy(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("GetIamPolicy") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: iam_policy_pb2.GetIamPolicyRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. - - Args: - request (~.iam_policy_pb2.GetIamPolicyRequest): - The request object. Request message for ``GetIamPolicy`` method. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which - specifies access controls for Google Cloud resources. - - A ``Policy`` is a collection of ``bindings``. A - ``binding`` binds one or more ``members``, or - principals, to a single ``role``. Principals can be user - accounts, service accounts, Google groups, and domains - (such as G Suite). A ``role`` is a named list of - permissions; each ``role`` can be an IAM predefined role - or a user-created custom role. - - For some types of Google Cloud resources, a ``binding`` - can also specify a ``condition``, which is a logical - expression that allows access to a resource only if the - expression evaluates to ``true``. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the `IAM - documentation `__. - - **JSON example:** - - :: - - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } - - **YAML example:** - - :: - - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 - - For a description of IAM and its features, see the `IAM - documentation `__. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{resource=projects/*/instances/*}:getIamPolicy', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = policy_pb2.Policy() - pb_resp = resp - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_iam_policy(resp) - return resp - - class _GetInstance(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("GetInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.GetInstanceRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> instance.Instance: - r"""Call the get instance method over HTTP. - - Args: - request (~.bigtable_instance_admin.GetInstanceRequest): - The request object. Request message for - BigtableInstanceAdmin.GetInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.instance.Instance: - A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{name=projects/*/instances/*}', - }, - ] - request, metadata = self._interceptor.pre_get_instance(request, metadata) - pb_request = bigtable_instance_admin.GetInstanceRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = instance.Instance() - pb_resp = instance.Instance.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_instance(resp) - return resp - - class _ListAppProfiles(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("ListAppProfiles") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.ListAppProfilesRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> bigtable_instance_admin.ListAppProfilesResponse: - r"""Call the list app profiles method over HTTP. - - Args: - request (~.bigtable_instance_admin.ListAppProfilesRequest): - The request object. Request message for - BigtableInstanceAdmin.ListAppProfiles. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable_instance_admin.ListAppProfilesResponse: - Response message for - BigtableInstanceAdmin.ListAppProfiles. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{parent=projects/*/instances/*}/appProfiles', - }, - ] - request, metadata = self._interceptor.pre_list_app_profiles(request, metadata) - pb_request = bigtable_instance_admin.ListAppProfilesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_instance_admin.ListAppProfilesResponse() - pb_resp = bigtable_instance_admin.ListAppProfilesResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_app_profiles(resp) - return resp - - class _ListClusters(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("ListClusters") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.ListClustersRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> bigtable_instance_admin.ListClustersResponse: - r"""Call the list clusters method over HTTP. - - Args: - request (~.bigtable_instance_admin.ListClustersRequest): - The request object. Request message for - BigtableInstanceAdmin.ListClusters. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable_instance_admin.ListClustersResponse: - Response message for - BigtableInstanceAdmin.ListClusters. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{parent=projects/*/instances/*}/clusters', - }, - ] - request, metadata = self._interceptor.pre_list_clusters(request, metadata) - pb_request = bigtable_instance_admin.ListClustersRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_instance_admin.ListClustersResponse() - pb_resp = bigtable_instance_admin.ListClustersResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_clusters(resp) - return resp - - class _ListHotTablets(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("ListHotTablets") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.ListHotTabletsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> bigtable_instance_admin.ListHotTabletsResponse: - r"""Call the list hot tablets method over HTTP. - - Args: - request (~.bigtable_instance_admin.ListHotTabletsRequest): - The request object. Request message for - BigtableInstanceAdmin.ListHotTablets. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable_instance_admin.ListHotTabletsResponse: - Response message for - BigtableInstanceAdmin.ListHotTablets. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets', - }, - ] - request, metadata = self._interceptor.pre_list_hot_tablets(request, metadata) - pb_request = bigtable_instance_admin.ListHotTabletsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_instance_admin.ListHotTabletsResponse() - pb_resp = bigtable_instance_admin.ListHotTabletsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_hot_tablets(resp) - return resp - - class _ListInstances(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("ListInstances") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.ListInstancesRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> bigtable_instance_admin.ListInstancesResponse: - r"""Call the list instances method over HTTP. - - Args: - request (~.bigtable_instance_admin.ListInstancesRequest): - The request object. Request message for - BigtableInstanceAdmin.ListInstances. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable_instance_admin.ListInstancesResponse: - Response message for - BigtableInstanceAdmin.ListInstances. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{parent=projects/*}/instances', - }, - ] - request, metadata = self._interceptor.pre_list_instances(request, metadata) - pb_request = bigtable_instance_admin.ListInstancesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_instance_admin.ListInstancesResponse() - pb_resp = bigtable_instance_admin.ListInstancesResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_instances(resp) - return resp - - class _PartialUpdateCluster(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("PartialUpdateCluster") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask" : {}, } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.PartialUpdateClusterRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the partial update cluster method over HTTP. - - Args: - request (~.bigtable_instance_admin.PartialUpdateClusterRequest): - The request object. Request message for - BigtableInstanceAdmin.PartialUpdateCluster. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'patch', - 'uri': '/v2/{cluster.name=projects/*/instances/*/clusters/*}', - 'body': 'cluster', - }, - ] - request, metadata = self._interceptor.pre_partial_update_cluster(request, metadata) - pb_request = bigtable_instance_admin.PartialUpdateClusterRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_partial_update_cluster(resp) - return resp - - class _PartialUpdateInstance(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("PartialUpdateInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask" : {}, } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.PartialUpdateInstanceRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the partial update instance method over HTTP. - - Args: - request (~.bigtable_instance_admin.PartialUpdateInstanceRequest): - The request object. Request message for - BigtableInstanceAdmin.PartialUpdateInstance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'patch', - 'uri': '/v2/{instance.name=projects/*/instances/*}', - 'body': 'instance', - }, - ] - request, metadata = self._interceptor.pre_partial_update_instance(request, metadata) - pb_request = bigtable_instance_admin.PartialUpdateInstanceRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_partial_update_instance(resp) - return resp - - class _SetIamPolicy(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("SetIamPolicy") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: iam_policy_pb2.SetIamPolicyRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. - - Args: - request (~.iam_policy_pb2.SetIamPolicyRequest): - The request object. Request message for ``SetIamPolicy`` method. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which - specifies access controls for Google Cloud resources. - - A ``Policy`` is a collection of ``bindings``. A - ``binding`` binds one or more ``members``, or - principals, to a single ``role``. Principals can be user - accounts, service accounts, Google groups, and domains - (such as G Suite). A ``role`` is a named list of - permissions; each ``role`` can be an IAM predefined role - or a user-created custom role. - - For some types of Google Cloud resources, a ``binding`` - can also specify a ``condition``, which is a logical - expression that allows access to a resource only if the - expression evaluates to ``true``. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the `IAM - documentation `__. - - **JSON example:** - - :: - - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } - - **YAML example:** - - :: - - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 - - For a description of IAM and its features, see the `IAM - documentation `__. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{resource=projects/*/instances/*}:setIamPolicy', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = policy_pb2.Policy() - pb_resp = resp - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_set_iam_policy(resp) - return resp - - class _TestIamPermissions(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("TestIamPermissions") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: iam_policy_pb2.TestIamPermissionsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. - - Args: - request (~.iam_policy_pb2.TestIamPermissionsRequest): - The request object. Request message for ``TestIamPermissions`` method. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.iam_policy_pb2.TestIamPermissionsResponse: - Response message for ``TestIamPermissions`` method. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{resource=projects/*/instances/*}:testIamPermissions', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_test_iam_permissions(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = iam_policy_pb2.TestIamPermissionsResponse() - pb_resp = resp - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_test_iam_permissions(resp) - return resp - - class _UpdateAppProfile(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("UpdateAppProfile") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask" : {}, } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_instance_admin.UpdateAppProfileRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the update app profile method over HTTP. - - Args: - request (~.bigtable_instance_admin.UpdateAppProfileRequest): - The request object. Request message for - BigtableInstanceAdmin.UpdateAppProfile. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'patch', - 'uri': '/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}', - 'body': 'app_profile', - }, - ] - request, metadata = self._interceptor.pre_update_app_profile(request, metadata) - pb_request = bigtable_instance_admin.UpdateAppProfileRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_app_profile(resp) - return resp - - class _UpdateCluster(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("UpdateCluster") - - def __call__(self, - request: instance.Cluster, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the update cluster method over HTTP. - - Args: - request (~.instance.Cluster): - The request object. A resizable group of nodes in a particular cloud - location, capable of serving all - [Tables][google.bigtable.admin.v2.Table] in the parent - [Instance][google.bigtable.admin.v2.Instance]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'put', - 'uri': '/v2/{name=projects/*/instances/*/clusters/*}', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_update_cluster(request, metadata) - pb_request = instance.Cluster.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_cluster(resp) - return resp - - class _UpdateInstance(BigtableInstanceAdminRestStub): - def __hash__(self): - return hash("UpdateInstance") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: instance.Instance, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> instance.Instance: - r"""Call the update instance method over HTTP. - - Args: - request (~.instance.Instance): - The request object. A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.instance.Instance: - A collection of Bigtable - [Tables][google.bigtable.admin.v2.Table] and the - resources that serve them. All tables in an instance are - served from all - [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'put', - 'uri': '/v2/{name=projects/*/instances/*}', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_update_instance(request, metadata) - pb_request = instance.Instance.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = instance.Instance() - pb_resp = instance.Instance.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_instance(resp) - return resp - - @property - def create_app_profile(self) -> Callable[ - [bigtable_instance_admin.CreateAppProfileRequest], - instance.AppProfile]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateAppProfile(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_cluster(self) -> Callable[ - [bigtable_instance_admin.CreateClusterRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateCluster(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_instance(self) -> Callable[ - [bigtable_instance_admin.CreateInstanceRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateInstance(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_app_profile(self) -> Callable[ - [bigtable_instance_admin.DeleteAppProfileRequest], - empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteAppProfile(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_cluster(self) -> Callable[ - [bigtable_instance_admin.DeleteClusterRequest], - empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteCluster(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_instance(self) -> Callable[ - [bigtable_instance_admin.DeleteInstanceRequest], - empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteInstance(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_app_profile(self) -> Callable[ - [bigtable_instance_admin.GetAppProfileRequest], - instance.AppProfile]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetAppProfile(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_cluster(self) -> Callable[ - [bigtable_instance_admin.GetClusterRequest], - instance.Cluster]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetCluster(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_iam_policy(self) -> Callable[ - [iam_policy_pb2.GetIamPolicyRequest], - policy_pb2.Policy]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_instance(self) -> Callable[ - [bigtable_instance_admin.GetInstanceRequest], - instance.Instance]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetInstance(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_app_profiles(self) -> Callable[ - [bigtable_instance_admin.ListAppProfilesRequest], - bigtable_instance_admin.ListAppProfilesResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListAppProfiles(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_clusters(self) -> Callable[ - [bigtable_instance_admin.ListClustersRequest], - bigtable_instance_admin.ListClustersResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListClusters(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_hot_tablets(self) -> Callable[ - [bigtable_instance_admin.ListHotTabletsRequest], - bigtable_instance_admin.ListHotTabletsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListHotTablets(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_instances(self) -> Callable[ - [bigtable_instance_admin.ListInstancesRequest], - bigtable_instance_admin.ListInstancesResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListInstances(self._session, self._host, self._interceptor) # type: ignore - - @property - def partial_update_cluster(self) -> Callable[ - [bigtable_instance_admin.PartialUpdateClusterRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._PartialUpdateCluster(self._session, self._host, self._interceptor) # type: ignore - - @property - def partial_update_instance(self) -> Callable[ - [bigtable_instance_admin.PartialUpdateInstanceRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._PartialUpdateInstance(self._session, self._host, self._interceptor) # type: ignore - - @property - def set_iam_policy(self) -> Callable[ - [iam_policy_pb2.SetIamPolicyRequest], - policy_pb2.Policy]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore - - @property - def test_iam_permissions(self) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - iam_policy_pb2.TestIamPermissionsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_app_profile(self) -> Callable[ - [bigtable_instance_admin.UpdateAppProfileRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateAppProfile(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_cluster(self) -> Callable[ - [instance.Cluster], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateCluster(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_instance(self) -> Callable[ - [instance.Instance], - instance.Instance]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateInstance(self._session, self._host, self._interceptor) # type: ignore - - @property - def kind(self) -> str: - return "rest" - - def close(self): - self._session.close() - - -__all__=( - 'BigtableInstanceAdminRestTransport', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py deleted file mode 100644 index 4923ee911..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/__init__.py +++ /dev/null @@ -1,22 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .client import BigtableTableAdminClient -from .async_client import BigtableTableAdminAsyncClient - -__all__ = ( - 'BigtableTableAdminClient', - 'BigtableTableAdminAsyncClient', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py deleted file mode 100644 index fcf79ece6..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/async_client.py +++ /dev/null @@ -1,2758 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import functools -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union - -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -from google.api_core.client_options import ClientOptions -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry_async as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.AsyncRetry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.AsyncRetry, object, None] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO -from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport -from .client import BigtableTableAdminClient - - -class BigtableTableAdminAsyncClient: - """Service for creating, configuring, and deleting Cloud - Bigtable tables. - - Provides access to the table schemas only, not the data stored - within the tables. - """ - - _client: BigtableTableAdminClient - - # Copy defaults from the synchronous client for use here. - # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. - DEFAULT_ENDPOINT = BigtableTableAdminClient.DEFAULT_ENDPOINT - DEFAULT_MTLS_ENDPOINT = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT - _DEFAULT_ENDPOINT_TEMPLATE = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE - _DEFAULT_UNIVERSE = BigtableTableAdminClient._DEFAULT_UNIVERSE - - backup_path = staticmethod(BigtableTableAdminClient.backup_path) - parse_backup_path = staticmethod(BigtableTableAdminClient.parse_backup_path) - cluster_path = staticmethod(BigtableTableAdminClient.cluster_path) - parse_cluster_path = staticmethod(BigtableTableAdminClient.parse_cluster_path) - crypto_key_version_path = staticmethod(BigtableTableAdminClient.crypto_key_version_path) - parse_crypto_key_version_path = staticmethod(BigtableTableAdminClient.parse_crypto_key_version_path) - instance_path = staticmethod(BigtableTableAdminClient.instance_path) - parse_instance_path = staticmethod(BigtableTableAdminClient.parse_instance_path) - snapshot_path = staticmethod(BigtableTableAdminClient.snapshot_path) - parse_snapshot_path = staticmethod(BigtableTableAdminClient.parse_snapshot_path) - table_path = staticmethod(BigtableTableAdminClient.table_path) - parse_table_path = staticmethod(BigtableTableAdminClient.parse_table_path) - common_billing_account_path = staticmethod(BigtableTableAdminClient.common_billing_account_path) - parse_common_billing_account_path = staticmethod(BigtableTableAdminClient.parse_common_billing_account_path) - common_folder_path = staticmethod(BigtableTableAdminClient.common_folder_path) - parse_common_folder_path = staticmethod(BigtableTableAdminClient.parse_common_folder_path) - common_organization_path = staticmethod(BigtableTableAdminClient.common_organization_path) - parse_common_organization_path = staticmethod(BigtableTableAdminClient.parse_common_organization_path) - common_project_path = staticmethod(BigtableTableAdminClient.common_project_path) - parse_common_project_path = staticmethod(BigtableTableAdminClient.parse_common_project_path) - common_location_path = staticmethod(BigtableTableAdminClient.common_location_path) - parse_common_location_path = staticmethod(BigtableTableAdminClient.parse_common_location_path) - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableTableAdminAsyncClient: The constructed client. - """ - return BigtableTableAdminClient.from_service_account_info.__func__(BigtableTableAdminAsyncClient, info, *args, **kwargs) # type: ignore - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableTableAdminAsyncClient: The constructed client. - """ - return BigtableTableAdminClient.from_service_account_file.__func__(BigtableTableAdminAsyncClient, filename, *args, **kwargs) # type: ignore - - from_service_account_json = from_service_account_file - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[ClientOptions] = None): - """Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - return BigtableTableAdminClient.get_mtls_endpoint_and_cert_source(client_options) # type: ignore - - @property - def transport(self) -> BigtableTableAdminTransport: - """Returns the transport used by the client instance. - - Returns: - BigtableTableAdminTransport: The transport used by the client instance. - """ - return self._client.transport - - @property - def api_endpoint(self): - """Return the API endpoint used by the client instance. - - Returns: - str: The API endpoint used by the client instance. - """ - return self._client._api_endpoint - - @property - def universe_domain(self) -> str: - """Return the universe domain used by the client instance. - - Returns: - str: The universe domain used - by the client instance. - """ - return self._client._universe_domain - - get_transport_class = functools.partial(type(BigtableTableAdminClient).get_transport_class, type(BigtableTableAdminClient)) - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Union[str, BigtableTableAdminTransport] = "grpc_asyncio", - client_options: Optional[ClientOptions] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the bigtable table admin async client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, ~.BigtableTableAdminTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): - Custom options for the client. - - 1. The ``api_endpoint`` property can be used to override the - default endpoint provided by the client when ``transport`` is - not explicitly provided. Only if this property is not set and - ``transport`` was not explicitly provided, the endpoint is - determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment - variable, which have one of the following values: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto-switch to the - default mTLS endpoint if client certificate is present; this is - the default value). - - 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide a client certificate for mTLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - 3. The ``universe_domain`` property can be used to override the - default "googleapis.com" universe. Note that ``api_endpoint`` - property still takes precedence; and ``universe_domain`` is - currently not supported for mTLS. - - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client = BigtableTableAdminClient( - credentials=credentials, - transport=transport, - client_options=client_options, - client_info=client_info, - - ) - - async def create_table(self, - request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, - *, - parent: Optional[str] = None, - table_id: Optional[str] = None, - table: Optional[gba_table.Table] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gba_table.Table: - r"""Creates a new table in the specified instance. - The table can be created with a full set of initial - column families, specified in the request. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - parent (:class:`str`): - Required. The unique name of the instance in which to - create the table. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - table_id (:class:`str`): - Required. The name by which the new table should be - referred to within the parent instance, e.g., ``foobar`` - rather than ``{parent}/tables/foobar``. Maximum 50 - characters. - - This corresponds to the ``table_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - table (:class:`google.cloud.bigtable_admin_v2.types.Table`): - Required. The Table to create. - This corresponds to the ``table`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, table_id, table]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.CreateTableRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if table_id is not None: - request.table_id = table_id - if table is not None: - request.table = table - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_table, - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def create_table_from_snapshot(self, - request: Optional[Union[bigtable_table_admin.CreateTableFromSnapshotRequest, dict]] = None, - *, - parent: Optional[str] = None, - table_id: Optional[str] = None, - source_snapshot: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new table from the specified snapshot. The - target table must not exist. The snapshot and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - parent (:class:`str`): - Required. The unique name of the instance in which to - create the table. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - table_id (:class:`str`): - Required. The name by which the new table should be - referred to within the parent instance, e.g., ``foobar`` - rather than ``{parent}/tables/foobar``. - - This corresponds to the ``table_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - source_snapshot (:class:`str`): - Required. The unique name of the snapshot from which to - restore the table. The snapshot and the table must be in - the same instance. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - - This corresponds to the ``source_snapshot`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, table_id, source_snapshot]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if table_id is not None: - request.table_id = table_id - if source_snapshot is not None: - request.source_snapshot = source_snapshot - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_table_from_snapshot, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.CreateTableFromSnapshotMetadata, - ) - - # Done; return the response. - return response - - async def list_tables(self, - request: Optional[Union[bigtable_table_admin.ListTablesRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTablesAsyncPager: - r"""Lists all tables served from a specified instance. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - parent (:class:`str`): - Required. The unique name of the instance for which - tables should be listed. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesAsyncPager: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.ListTablesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_tables, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListTablesAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def get_table(self, - request: Optional[Union[bigtable_table_admin.GetTableRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Table: - r"""Gets metadata information about the specified table. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - name (:class:`str`): - Required. The unique name of the requested table. Values - are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.GetTableRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_table, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_table(self, - request: Optional[Union[bigtable_table_admin.UpdateTableRequest, dict]] = None, - *, - table: Optional[gba_table.Table] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Updates a specified table. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]]): - The request object. The request for - [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. - table (:class:`google.cloud.bigtable_admin_v2.types.Table`): - Required. The table to update. The table's ``name`` - field is used to identify the table to update. - - This corresponds to the ``table`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. The list of fields to update. A mask - specifying which fields (e.g. ``change_stream_config``) - in the ``table`` field should be updated. This mask is - relative to the ``table`` field, not to the request - message. The wildcard (*) path is currently not - supported. Currently UpdateTable is only supported for - the following fields: - - - ``change_stream_config`` - - ``change_stream_config.retention_period`` - - ``deletion_protection`` - - If ``column_families`` is set in ``update_mask``, it - will return an UNIMPLEMENTED error. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.UpdateTableRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table is not None: - request.table = table - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_table, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table.name", request.table.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - gba_table.Table, - metadata_type=bigtable_table_admin.UpdateTableMetadata, - ) - - # Done; return the response. - return response - - async def delete_table(self, - request: Optional[Union[bigtable_table_admin.DeleteTableRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Permanently deletes a specified table and all of its - data. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - name (:class:`str`): - Required. The unique name of the table to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.DeleteTableRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_table, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def undelete_table(self, - request: Optional[Union[bigtable_table_admin.UndeleteTableRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Restores a specified table which was accidentally - deleted. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] - name (:class:`str`): - Required. The unique name of the table to be restored. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.UndeleteTableRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.undelete_table, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.UndeleteTableMetadata, - ) - - # Done; return the response. - return response - - async def modify_column_families(self, - request: Optional[Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict]] = None, - *, - name: Optional[str] = None, - modifications: Optional[MutableSequence[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Table: - r"""Performs a series of column family modifications on - the specified table. Either all or none of the - modifications will occur before this method returns, but - data requests received prior to that point may see a - table where only some modifications have taken effect. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - name (:class:`str`): - Required. The unique name of the table whose families - should be modified. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - modifications (:class:`MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]`): - Required. Modifications to be - atomically applied to the specified - table's families. Entries are applied in - order, meaning that earlier - modifications can be masked by later - ones (in the case of repeated updates to - the same family, for example). - - This corresponds to the ``modifications`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, modifications]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if modifications: - request.modifications.extend(modifications) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.modify_column_families, - default_timeout=300.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def drop_row_range(self, - request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Permanently drop/delete a row range from a specified - table. The request can specify whether to delete all - rows in a table, or only those that match a particular - prefix. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - request = bigtable_table_admin.DropRowRangeRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.drop_row_range, - default_timeout=3600.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def generate_consistency_token(self, - request: Optional[Union[bigtable_table_admin.GenerateConsistencyTokenRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: - r"""Generates a consistency token for a Table, which can - be used in CheckConsistency to check whether mutations - to the table that finished before this call started have - been replicated. The tokens will be available for 90 - days. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - name (:class:`str`): - Required. The unique name of the Table for which to - create a consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.generate_consistency_token, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def check_consistency(self, - request: Optional[Union[bigtable_table_admin.CheckConsistencyRequest, dict]] = None, - *, - name: Optional[str] = None, - consistency_token: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_table_admin.CheckConsistencyResponse: - r"""Checks replication consistency based on a consistency - token, that is, if replication has caught up based on - the conditions specified in the token and the check - request. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - name (:class:`str`): - Required. The unique name of the Table for which to - check replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - consistency_token (:class:`str`): - Required. The token created using - GenerateConsistencyToken for the Table. - - This corresponds to the ``consistency_token`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, consistency_token]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.CheckConsistencyRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if consistency_token is not None: - request.consistency_token = consistency_token - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.check_consistency, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def snapshot_table(self, - request: Optional[Union[bigtable_table_admin.SnapshotTableRequest, dict]] = None, - *, - name: Optional[str] = None, - cluster: Optional[str] = None, - snapshot_id: Optional[str] = None, - description: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Creates a new snapshot in the specified cluster from - the specified source table. The cluster and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - name (:class:`str`): - Required. The unique name of the table to have the - snapshot taken. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (:class:`str`): - Required. The name of the cluster where the snapshot - will be created in. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - snapshot_id (:class:`str`): - Required. The ID by which the new snapshot should be - referred to within the parent cluster, e.g., - ``mysnapshot`` of the form: - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. - - This corresponds to the ``snapshot_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - description (:class:`str`): - Description of the snapshot. - This corresponds to the ``description`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a - checkpoint for data restoration or a data source for - a new table. - - Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently - available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible - ways and is not recommended for production use. It is - not subject to any SLA or deprecation policy. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, cluster, snapshot_id, description]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.SnapshotTableRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if cluster is not None: - request.cluster = cluster - if snapshot_id is not None: - request.snapshot_id = snapshot_id - if description is not None: - request.description = description - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.snapshot_table, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.Snapshot, - metadata_type=bigtable_table_admin.SnapshotTableMetadata, - ) - - # Done; return the response. - return response - - async def get_snapshot(self, - request: Optional[Union[bigtable_table_admin.GetSnapshotRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Snapshot: - r"""Gets metadata information about the specified - snapshot. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - name (:class:`str`): - Required. The unique name of the requested snapshot. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Snapshot: - A snapshot of a table at a particular - time. A snapshot can be used as a - checkpoint for data restoration or a - data source for a new table. - - Note: This is a private alpha release of - Cloud Bigtable snapshots. This feature - is not currently available to most Cloud - Bigtable customers. This feature might - be changed in backward-incompatible ways - and is not recommended for production - use. It is not subject to any SLA or - deprecation policy. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.GetSnapshotRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_snapshot, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def list_snapshots(self, - request: Optional[Union[bigtable_table_admin.ListSnapshotsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListSnapshotsAsyncPager: - r"""Lists all snapshots associated with the specified - cluster. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - parent (:class:`str`): - Required. The unique name of the cluster for which - snapshots should be listed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list snapshots for all - clusters in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsAsyncPager: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently - available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible - ways and is not recommended for production use. It is - not subject to any SLA or deprecation policy. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.ListSnapshotsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_snapshots, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListSnapshotsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_snapshot(self, - request: Optional[Union[bigtable_table_admin.DeleteSnapshotRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - name (:class:`str`): - Required. The unique name of the snapshot to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.DeleteSnapshotRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_snapshot, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def create_backup(self, - request: Optional[Union[bigtable_table_admin.CreateBackupRequest, dict]] = None, - *, - parent: Optional[str] = None, - backup_id: Optional[str] = None, - backup: Optional[table.Backup] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be - used to track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Backup][google.bigtable.admin.v2.Backup], if successful. - Cancelling the returned operation will stop the creation and - delete the backup. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]]): - The request object. The request for - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - parent (:class:`str`): - Required. This must be one of the clusters in the - instance in which this table is located. The backup will - be stored in this cluster. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup_id (:class:`str`): - Required. The id of the backup to be created. The - ``backup_id`` along with the parent ``parent`` are - combined as {parent}/backups/{backup_id} to create the - full backup name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - - This corresponds to the ``backup_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): - Required. The backup to create. - This corresponds to the ``backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.Backup` A - backup of a Cloud Bigtable table. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, backup_id, backup]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.CreateBackupRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if backup_id is not None: - request.backup_id = backup_id - if backup is not None: - request.backup = backup - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.create_backup, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.Backup, - metadata_type=bigtable_table_admin.CreateBackupMetadata, - ) - - # Done; return the response. - return response - - async def get_backup(self, - request: Optional[Union[bigtable_table_admin.GetBackupRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Backup: - r"""Gets metadata on a pending or completed Cloud - Bigtable Backup. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]]): - The request object. The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - name (:class:`str`): - Required. Name of the backup. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Backup: - A backup of a Cloud Bigtable table. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.GetBackupRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_backup, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def update_backup(self, - request: Optional[Union[bigtable_table_admin.UpdateBackupRequest, dict]] = None, - *, - backup: Optional[table.Backup] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Backup: - r"""Updates a pending or completed Cloud Bigtable Backup. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]]): - The request object. The request for - [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - backup (:class:`google.cloud.bigtable_admin_v2.types.Backup`): - Required. The backup to update. ``backup.name``, and the - fields to be updated as specified by ``update_mask`` are - required. Other fields are ignored. Update is only - supported for the following fields: - - - ``backup.expire_time``. - - This corresponds to the ``backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (:class:`google.protobuf.field_mask_pb2.FieldMask`): - Required. A mask specifying which fields (e.g. - ``expire_time``) in the Backup resource should be - updated. This mask is relative to the Backup resource, - not to the request message. The field mask must always - be specified; this prevents any future fields from being - erased accidentally by clients that do not know about - them. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Backup: - A backup of a Cloud Bigtable table. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([backup, update_mask]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.UpdateBackupRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if backup is not None: - request.backup = backup - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.update_backup, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("backup.name", request.backup.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def delete_backup(self, - request: Optional[Union[bigtable_table_admin.DeleteBackupRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a pending or completed Cloud Bigtable backup. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]]): - The request object. The request for - [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. - name (:class:`str`): - Required. Name of the backup to delete. Values are of - the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.DeleteBackupRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.delete_backup, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - async def list_backups(self, - request: Optional[Union[bigtable_table_admin.ListBackupsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListBackupsAsyncPager: - r"""Lists Cloud Bigtable backups. Returns both completed - and pending backups. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]]): - The request object. The request for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - parent (:class:`str`): - Required. The cluster to list backups from. Values are - of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list backups for all clusters - in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsAsyncPager: - The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.ListBackupsRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.list_backups, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__aiter__` convenience method. - response = pagers.ListBackupsAsyncPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def restore_table(self, - request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Create a new table by restoring from a completed backup. The - returned table [long-running - operation][google.longrunning.Operation] can be used to track - the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. - The [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]]): - The request object. The request for - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - request = bigtable_table_admin.RestoreTableRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.restore_table, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.RestoreTableMetadata, - ) - - # Done; return the response. - return response - - async def copy_backup(self, - request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, - *, - parent: Optional[str] = None, - backup_id: Optional[str] = None, - source_backup: Optional[str] = None, - expire_time: Optional[timestamp_pb2.Timestamp] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation_async.AsyncOperation: - r"""Copy a Cloud Bigtable backup to a new backup in the - destination cluster located in the destination instance - and project. - - Args: - request (Optional[Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]]): - The request object. The request for - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. - parent (:class:`str`): - Required. The name of the destination cluster that will - contain the backup copy. The cluster must already - exists. Values are of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup_id (:class:`str`): - Required. The id of the new backup. The ``backup_id`` - along with ``parent`` are combined as - {parent}/backups/{backup_id} to create the full backup - name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - - This corresponds to the ``backup_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - source_backup (:class:`str`): - Required. The source backup to be copied from. The - source backup needs to be in READY state for it to be - copied. Copying a copied backup is not allowed. Once - CopyBackup is in progress, the source backup cannot be - deleted or cleaned up on expiration until CopyBackup is - finished. Values are of the form: - ``projects//instances//clusters//backups/``. - - This corresponds to the ``source_backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - expire_time (:class:`google.protobuf.timestamp_pb2.Timestamp`): - Required. Required. The expiration time of the copied - backup with microsecond granularity that must be at - least 6 hours and at most 30 days from the time the - request is received. Once the ``expire_time`` has - passed, Cloud Bigtable will delete the backup and free - the resources used by the backup. - - This corresponds to the ``expire_time`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation_async.AsyncOperation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.Backup` A - backup of a Cloud Bigtable table. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, backup_id, source_backup, expire_time]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - request = bigtable_table_admin.CopyBackupRequest(request) - - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if backup_id is not None: - request.backup_id = backup_id - if source_backup is not None: - request.source_backup = source_backup - if expire_time is not None: - request.expire_time = expire_time - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.copy_backup, - default_timeout=None, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation_async.from_gapic( - response, - self._client._transport.operations_client, - table.Backup, - metadata_type=bigtable_table_admin.CopyBackupMetadata, - ) - - # Done; return the response. - return response - - async def get_iam_policy(self, - request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> policy_pb2.Policy: - r"""Gets the access control policy for a Table or Backup - resource. Returns an empty policy if the resource exists - but does not have a policy set. - - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]]): - The request object. Request message for ``GetIamPolicy`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.GetIamPolicyRequest(**request) - elif not request: - request = iam_policy_pb2.GetIamPolicyRequest(resource=resource, ) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.get_iam_policy, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("resource", request.resource), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def set_iam_policy(self, - request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> policy_pb2.Policy: - r"""Sets the access control policy on a Table or Backup - resource. Replaces any existing policy. - - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]]): - The request object. Request message for ``SetIamPolicy`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.SetIamPolicyRequest(**request) - elif not request: - request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, ) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.set_iam_policy, - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("resource", request.resource), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def test_iam_permissions(self, - request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, - *, - resource: Optional[str] = None, - permissions: Optional[MutableSequence[str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Returns permissions that the caller has on the - specified Table or Backup resource. - - Args: - request (Optional[Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]]): - The request object. Request message for ``TestIamPermissions`` method. - resource (:class:`str`): - REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - permissions (:class:`MutableSequence[str]`): - The set of permissions to check for the ``resource``. - Permissions with wildcards (such as '*' or 'storage.*') - are not allowed. For more information see `IAM - Overview `__. - - This corresponds to the ``permissions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry_async.AsyncRetry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: - Response message for TestIamPermissions method. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource, permissions]) - if request is not None and has_flattened_params: - raise ValueError("If the `request` argument is set, then none of " - "the individual field arguments should be set.") - - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - if isinstance(request, dict): - request = iam_policy_pb2.TestIamPermissionsRequest(**request) - elif not request: - request = iam_policy_pb2.TestIamPermissionsRequest(resource=resource, permissions=permissions, ) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = gapic_v1.method_async.wrap_method( - self._client._transport.test_iam_permissions, - default_retry=retries.AsyncRetry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=DEFAULT_CLIENT_INFO, - ) - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("resource", request.resource), - )), - ) - - # Validate the universe domain. - self._client._validate_universe_domain() - - # Send the request. - response = await rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - async def __aenter__(self) -> "BigtableTableAdminAsyncClient": - return self - - async def __aexit__(self, exc_type, exc, tb): - await self.transport.close() - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "BigtableTableAdminAsyncClient", -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py deleted file mode 100644 index 8c5ba4208..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/client.py +++ /dev/null @@ -1,3083 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -import os -import re -from typing import Dict, Mapping, MutableMapping, MutableSequence, Optional, Sequence, Tuple, Type, Union, cast -import warnings - -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -from google.api_core import client_options as client_options_lib -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport import mtls # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth.exceptions import MutualTLSChannelError # type: ignore -from google.oauth2 import service_account # type: ignore - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - -from google.api_core import operation # type: ignore -from google.api_core import operation_async # type: ignore -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from .transports.base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO -from .transports.grpc import BigtableTableAdminGrpcTransport -from .transports.grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport -from .transports.rest import BigtableTableAdminRestTransport - - -class BigtableTableAdminClientMeta(type): - """Metaclass for the BigtableTableAdmin client. - - This provides class-level methods for building and retrieving - support objects (e.g. transport) without polluting the client instance - objects. - """ - _transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTableAdminTransport]] - _transport_registry["grpc"] = BigtableTableAdminGrpcTransport - _transport_registry["grpc_asyncio"] = BigtableTableAdminGrpcAsyncIOTransport - _transport_registry["rest"] = BigtableTableAdminRestTransport - - def get_transport_class(cls, - label: Optional[str] = None, - ) -> Type[BigtableTableAdminTransport]: - """Returns an appropriate transport class. - - Args: - label: The name of the desired transport. If none is - provided, then the first transport in the registry is used. - - Returns: - The transport class to use. - """ - # If a specific transport is requested, return that one. - if label: - return cls._transport_registry[label] - - # No transport is requested; return the default (that is, the first one - # in the dictionary). - return next(iter(cls._transport_registry.values())) - - -class BigtableTableAdminClient(metaclass=BigtableTableAdminClientMeta): - """Service for creating, configuring, and deleting Cloud - Bigtable tables. - - Provides access to the table schemas only, not the data stored - within the tables. - """ - - @staticmethod - def _get_default_mtls_endpoint(api_endpoint): - """Converts api endpoint to mTLS endpoint. - - Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to - "*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively. - Args: - api_endpoint (Optional[str]): the api endpoint to convert. - Returns: - str: converted mTLS api endpoint. - """ - if not api_endpoint: - return api_endpoint - - mtls_endpoint_re = re.compile( - r"(?P[^.]+)(?P\.mtls)?(?P\.sandbox)?(?P\.googleapis\.com)?" - ) - - m = mtls_endpoint_re.match(api_endpoint) - name, mtls, sandbox, googledomain = m.groups() - if mtls or not googledomain: - return api_endpoint - - if sandbox: - return api_endpoint.replace( - "sandbox.googleapis.com", "mtls.sandbox.googleapis.com" - ) - - return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com") - - # Note: DEFAULT_ENDPOINT is deprecated. Use _DEFAULT_ENDPOINT_TEMPLATE instead. - DEFAULT_ENDPOINT = "bigtableadmin.googleapis.com" - DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore - DEFAULT_ENDPOINT - ) - - _DEFAULT_ENDPOINT_TEMPLATE = "bigtableadmin.{UNIVERSE_DOMAIN}" - _DEFAULT_UNIVERSE = "googleapis.com" - - @classmethod - def from_service_account_info(cls, info: dict, *args, **kwargs): - """Creates an instance of this client using the provided credentials - info. - - Args: - info (dict): The service account private key info. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableTableAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_info(info) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - @classmethod - def from_service_account_file(cls, filename: str, *args, **kwargs): - """Creates an instance of this client using the provided credentials - file. - - Args: - filename (str): The path to the service account private key json - file. - args: Additional arguments to pass to the constructor. - kwargs: Additional arguments to pass to the constructor. - - Returns: - BigtableTableAdminClient: The constructed client. - """ - credentials = service_account.Credentials.from_service_account_file( - filename) - kwargs["credentials"] = credentials - return cls(*args, **kwargs) - - from_service_account_json = from_service_account_file - - @property - def transport(self) -> BigtableTableAdminTransport: - """Returns the transport used by the client instance. - - Returns: - BigtableTableAdminTransport: The transport used by the client - instance. - """ - return self._transport - - @staticmethod - def backup_path(project: str,instance: str,cluster: str,backup: str,) -> str: - """Returns a fully-qualified backup string.""" - return "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format(project=project, instance=instance, cluster=cluster, backup=backup, ) - - @staticmethod - def parse_backup_path(path: str) -> Dict[str,str]: - """Parses a backup path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/backups/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def cluster_path(project: str,instance: str,cluster: str,) -> str: - """Returns a fully-qualified cluster string.""" - return "projects/{project}/instances/{instance}/clusters/{cluster}".format(project=project, instance=instance, cluster=cluster, ) - - @staticmethod - def parse_cluster_path(path: str) -> Dict[str,str]: - """Parses a cluster path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def crypto_key_version_path(project: str,location: str,key_ring: str,crypto_key: str,crypto_key_version: str,) -> str: - """Returns a fully-qualified crypto_key_version string.""" - return "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format(project=project, location=location, key_ring=key_ring, crypto_key=crypto_key, crypto_key_version=crypto_key_version, ) - - @staticmethod - def parse_crypto_key_version_path(path: str) -> Dict[str,str]: - """Parses a crypto_key_version path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)/keyRings/(?P.+?)/cryptoKeys/(?P.+?)/cryptoKeyVersions/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def instance_path(project: str,instance: str,) -> str: - """Returns a fully-qualified instance string.""" - return "projects/{project}/instances/{instance}".format(project=project, instance=instance, ) - - @staticmethod - def parse_instance_path(path: str) -> Dict[str,str]: - """Parses a instance path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def snapshot_path(project: str,instance: str,cluster: str,snapshot: str,) -> str: - """Returns a fully-qualified snapshot string.""" - return "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format(project=project, instance=instance, cluster=cluster, snapshot=snapshot, ) - - @staticmethod - def parse_snapshot_path(path: str) -> Dict[str,str]: - """Parses a snapshot path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/clusters/(?P.+?)/snapshots/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def table_path(project: str,instance: str,table: str,) -> str: - """Returns a fully-qualified table string.""" - return "projects/{project}/instances/{instance}/tables/{table}".format(project=project, instance=instance, table=table, ) - - @staticmethod - def parse_table_path(path: str) -> Dict[str,str]: - """Parses a table path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/instances/(?P.+?)/tables/(?P
.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_billing_account_path(billing_account: str, ) -> str: - """Returns a fully-qualified billing_account string.""" - return "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - - @staticmethod - def parse_common_billing_account_path(path: str) -> Dict[str,str]: - """Parse a billing_account path into its component segments.""" - m = re.match(r"^billingAccounts/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_folder_path(folder: str, ) -> str: - """Returns a fully-qualified folder string.""" - return "folders/{folder}".format(folder=folder, ) - - @staticmethod - def parse_common_folder_path(path: str) -> Dict[str,str]: - """Parse a folder path into its component segments.""" - m = re.match(r"^folders/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_organization_path(organization: str, ) -> str: - """Returns a fully-qualified organization string.""" - return "organizations/{organization}".format(organization=organization, ) - - @staticmethod - def parse_common_organization_path(path: str) -> Dict[str,str]: - """Parse a organization path into its component segments.""" - m = re.match(r"^organizations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_project_path(project: str, ) -> str: - """Returns a fully-qualified project string.""" - return "projects/{project}".format(project=project, ) - - @staticmethod - def parse_common_project_path(path: str) -> Dict[str,str]: - """Parse a project path into its component segments.""" - m = re.match(r"^projects/(?P.+?)$", path) - return m.groupdict() if m else {} - - @staticmethod - def common_location_path(project: str, location: str, ) -> str: - """Returns a fully-qualified location string.""" - return "projects/{project}/locations/{location}".format(project=project, location=location, ) - - @staticmethod - def parse_common_location_path(path: str) -> Dict[str,str]: - """Parse a location path into its component segments.""" - m = re.match(r"^projects/(?P.+?)/locations/(?P.+?)$", path) - return m.groupdict() if m else {} - - @classmethod - def get_mtls_endpoint_and_cert_source(cls, client_options: Optional[client_options_lib.ClientOptions] = None): - """Deprecated. Return the API endpoint and client cert source for mutual TLS. - - The client cert source is determined in the following order: - (1) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is not "true", the - client cert source is None. - (2) if `client_options.client_cert_source` is provided, use the provided one; if the - default client cert source exists, use the default one; otherwise the client cert - source is None. - - The API endpoint is determined in the following order: - (1) if `client_options.api_endpoint` if provided, use the provided one. - (2) if `GOOGLE_API_USE_CLIENT_CERTIFICATE` environment variable is "always", use the - default mTLS endpoint; if the environment variable is "never", use the default API - endpoint; otherwise if client cert source exists, use the default mTLS endpoint, otherwise - use the default API endpoint. - - More details can be found at https://google.aip.dev/auth/4114. - - Args: - client_options (google.api_core.client_options.ClientOptions): Custom options for the - client. Only the `api_endpoint` and `client_cert_source` properties may be used - in this method. - - Returns: - Tuple[str, Callable[[], Tuple[bytes, bytes]]]: returns the API endpoint and the - client cert source to use. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If any errors happen. - """ - - warnings.warn("get_mtls_endpoint_and_cert_source is deprecated. Use the api_endpoint property instead.", - DeprecationWarning) - if client_options is None: - client_options = client_options_lib.ClientOptions() - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false") - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto") - if use_client_cert not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") - - # Figure out the client cert source to use. - client_cert_source = None - if use_client_cert == "true": - if client_options.client_cert_source: - client_cert_source = client_options.client_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - - # Figure out which api endpoint to use. - if client_options.api_endpoint is not None: - api_endpoint = client_options.api_endpoint - elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): - api_endpoint = cls.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = cls.DEFAULT_ENDPOINT - - return api_endpoint, client_cert_source - - @staticmethod - def _read_environment_variables(): - """Returns the environment variables used by the client. - - Returns: - Tuple[bool, str, str]: returns the GOOGLE_API_USE_CLIENT_CERTIFICATE, - GOOGLE_API_USE_MTLS_ENDPOINT, and GOOGLE_CLOUD_UNIVERSE_DOMAIN environment variables. - - Raises: - ValueError: If GOOGLE_API_USE_CLIENT_CERTIFICATE is not - any of ["true", "false"]. - google.auth.exceptions.MutualTLSChannelError: If GOOGLE_API_USE_MTLS_ENDPOINT - is not any of ["auto", "never", "always"]. - """ - use_client_cert = os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false").lower() - use_mtls_endpoint = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto").lower() - universe_domain_env = os.getenv("GOOGLE_CLOUD_UNIVERSE_DOMAIN") - if use_client_cert not in ("true", "false"): - raise ValueError("Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`") - if use_mtls_endpoint not in ("auto", "never", "always"): - raise MutualTLSChannelError("Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`") - return use_client_cert == "true", use_mtls_endpoint, universe_domain_env - - def _get_client_cert_source(provided_cert_source, use_cert_flag): - """Return the client cert source to be used by the client. - - Args: - provided_cert_source (bytes): The client certificate source provided. - use_cert_flag (bool): A flag indicating whether to use the client certificate. - - Returns: - bytes or None: The client cert source to be used by the client. - """ - client_cert_source = None - if use_cert_flag: - if provided_cert_source: - client_cert_source = provided_cert_source - elif mtls.has_default_client_cert_source(): - client_cert_source = mtls.default_client_cert_source() - return client_cert_source - - def _get_api_endpoint(api_override, client_cert_source, universe_domain, use_mtls_endpoint): - """Return the API endpoint used by the client. - - Args: - api_override (str): The API endpoint override. If specified, this is always - the return value of this function and the other arguments are not used. - client_cert_source (bytes): The client certificate source used by the client. - universe_domain (str): The universe domain used by the client. - use_mtls_endpoint (str): How to use the mTLS endpoint, which depends also on the other parameters. - Possible values are "always", "auto", or "never". - - Returns: - str: The API endpoint to be used by the client. - """ - if api_override is not None: - api_endpoint = api_override - elif use_mtls_endpoint == "always" or (use_mtls_endpoint == "auto" and client_cert_source): - _default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE - if universe_domain != _default_universe: - raise MutualTLSChannelError(f"mTLS is not supported in any universe other than {_default_universe}.") - api_endpoint = BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT - else: - api_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=universe_domain) - return api_endpoint - - @staticmethod - def _get_universe_domain(client_universe_domain: Optional[str], universe_domain_env: Optional[str]) -> str: - """Return the universe domain used by the client. - - Args: - client_universe_domain (Optional[str]): The universe domain configured via the client options. - universe_domain_env (Optional[str]): The universe domain configured via the "GOOGLE_CLOUD_UNIVERSE_DOMAIN" environment variable. - - Returns: - str: The universe domain to be used by the client. - - Raises: - ValueError: If the universe domain is an empty string. - """ - universe_domain = BigtableTableAdminClient._DEFAULT_UNIVERSE - if client_universe_domain is not None: - universe_domain = client_universe_domain - elif universe_domain_env is not None: - universe_domain = universe_domain_env - if len(universe_domain.strip()) == 0: - raise ValueError("Universe Domain cannot be an empty string.") - return universe_domain - - @staticmethod - def _compare_universes(client_universe: str, - credentials: ga_credentials.Credentials) -> bool: - """Returns True iff the universe domains used by the client and credentials match. - - Args: - client_universe (str): The universe domain configured via the client options. - credentials (ga_credentials.Credentials): The credentials being used in the client. - - Returns: - bool: True iff client_universe matches the universe in credentials. - - Raises: - ValueError: when client_universe does not match the universe in credentials. - """ - if credentials: - credentials_universe = credentials.universe_domain - if client_universe != credentials_universe: - default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE - raise ValueError("The configured universe domain " - f"({client_universe}) does not match the universe domain " - f"found in the credentials ({credentials_universe}). " - "If you haven't configured the universe domain explicitly, " - f"`{default_universe}` is the default.") - return True - - def _validate_universe_domain(self): - """Validates client's and credentials' universe domains are consistent. - - Returns: - bool: True iff the configured universe domain is valid. - - Raises: - ValueError: If the configured universe domain is not valid. - """ - self._is_universe_domain_valid = (self._is_universe_domain_valid or - BigtableTableAdminClient._compare_universes(self.universe_domain, self.transport._credentials)) - return self._is_universe_domain_valid - - @property - def api_endpoint(self): - """Return the API endpoint used by the client instance. - - Returns: - str: The API endpoint used by the client instance. - """ - return self._api_endpoint - - @property - def universe_domain(self) -> str: - """Return the universe domain used by the client instance. - - Returns: - str: The universe domain used by the client instance. - """ - return self._universe_domain - - def __init__(self, *, - credentials: Optional[ga_credentials.Credentials] = None, - transport: Optional[Union[str, BigtableTableAdminTransport]] = None, - client_options: Optional[Union[client_options_lib.ClientOptions, dict]] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - ) -> None: - """Instantiates the bigtable table admin client. - - Args: - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - transport (Union[str, BigtableTableAdminTransport]): The - transport to use. If set to None, a transport is chosen - automatically. - client_options (Optional[Union[google.api_core.client_options.ClientOptions, dict]]): - Custom options for the client. - - 1. The ``api_endpoint`` property can be used to override the - default endpoint provided by the client when ``transport`` is - not explicitly provided. Only if this property is not set and - ``transport`` was not explicitly provided, the endpoint is - determined by the GOOGLE_API_USE_MTLS_ENDPOINT environment - variable, which have one of the following values: - "always" (always use the default mTLS endpoint), "never" (always - use the default regular endpoint) and "auto" (auto-switch to the - default mTLS endpoint if client certificate is present; this is - the default value). - - 2. If the GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable - is "true", then the ``client_cert_source`` property can be used - to provide a client certificate for mTLS transport. If - not provided, the default SSL client certificate will be used if - present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not - set, no client certificate will be used. - - 3. The ``universe_domain`` property can be used to override the - default "googleapis.com" universe. Note that the ``api_endpoint`` - property still takes precedence; and ``universe_domain`` is - currently not supported for mTLS. - - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - """ - self._client_options = client_options - if isinstance(self._client_options, dict): - self._client_options = client_options_lib.from_dict(self._client_options) - if self._client_options is None: - self._client_options = client_options_lib.ClientOptions() - self._client_options = cast(client_options_lib.ClientOptions, self._client_options) - - universe_domain_opt = getattr(self._client_options, 'universe_domain', None) - - self._use_client_cert, self._use_mtls_endpoint, self._universe_domain_env = BigtableTableAdminClient._read_environment_variables() - self._client_cert_source = BigtableTableAdminClient._get_client_cert_source(self._client_options.client_cert_source, self._use_client_cert) - self._universe_domain = BigtableTableAdminClient._get_universe_domain(universe_domain_opt, self._universe_domain_env) - self._api_endpoint = None # updated below, depending on `transport` - - # Initialize the universe domain validation. - self._is_universe_domain_valid = False - - api_key_value = getattr(self._client_options, "api_key", None) - if api_key_value and credentials: - raise ValueError("client_options.api_key and credentials are mutually exclusive") - - # Save or instantiate the transport. - # Ordinarily, we provide the transport, but allowing a custom transport - # instance provides an extensibility point for unusual situations. - transport_provided = isinstance(transport, BigtableTableAdminTransport) - if transport_provided: - # transport is a BigtableTableAdminTransport instance. - if credentials or self._client_options.credentials_file or api_key_value: - raise ValueError("When providing a transport instance, " - "provide its credentials directly.") - if self._client_options.scopes: - raise ValueError( - "When providing a transport instance, provide its scopes " - "directly." - ) - self._transport = cast(BigtableTableAdminTransport, transport) - self._api_endpoint = self._transport.host - - self._api_endpoint = (self._api_endpoint or - BigtableTableAdminClient._get_api_endpoint( - self._client_options.api_endpoint, - self._client_cert_source, - self._universe_domain, - self._use_mtls_endpoint)) - - if not transport_provided: - import google.auth._default # type: ignore - - if api_key_value and hasattr(google.auth._default, "get_api_key_credentials"): - credentials = google.auth._default.get_api_key_credentials(api_key_value) - - Transport = type(self).get_transport_class(cast(str, transport)) - self._transport = Transport( - credentials=credentials, - credentials_file=self._client_options.credentials_file, - host=self._api_endpoint, - scopes=self._client_options.scopes, - client_cert_source_for_mtls=self._client_cert_source, - quota_project_id=self._client_options.quota_project_id, - client_info=client_info, - always_use_jwt_access=True, - api_audience=self._client_options.api_audience, - ) - - def create_table(self, - request: Optional[Union[bigtable_table_admin.CreateTableRequest, dict]] = None, - *, - parent: Optional[str] = None, - table_id: Optional[str] = None, - table: Optional[gba_table.Table] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> gba_table.Table: - r"""Creates a new table in the specified instance. - The table can be created with a full set of initial - column families, specified in the request. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateTableRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - parent (str): - Required. The unique name of the instance in which to - create the table. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - table_id (str): - Required. The name by which the new table should be - referred to within the parent instance, e.g., ``foobar`` - rather than ``{parent}/tables/foobar``. Maximum 50 - characters. - - This corresponds to the ``table_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - table (google.cloud.bigtable_admin_v2.types.Table): - Required. The Table to create. - This corresponds to the ``table`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, table_id, table]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CreateTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.CreateTableRequest): - request = bigtable_table_admin.CreateTableRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if table_id is not None: - request.table_id = table_id - if table is not None: - request.table = table - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def create_table_from_snapshot(self, - request: Optional[Union[bigtable_table_admin.CreateTableFromSnapshotRequest, dict]] = None, - *, - parent: Optional[str] = None, - table_id: Optional[str] = None, - source_snapshot: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Creates a new table from the specified snapshot. The - target table must not exist. The snapshot and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - parent (str): - Required. The unique name of the instance in which to - create the table. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - table_id (str): - Required. The name by which the new table should be - referred to within the parent instance, e.g., ``foobar`` - rather than ``{parent}/tables/foobar``. - - This corresponds to the ``table_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - source_snapshot (str): - Required. The unique name of the snapshot from which to - restore the table. The snapshot and the table must be in - the same instance. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - - This corresponds to the ``source_snapshot`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, table_id, source_snapshot]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CreateTableFromSnapshotRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.CreateTableFromSnapshotRequest): - request = bigtable_table_admin.CreateTableFromSnapshotRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if table_id is not None: - request.table_id = table_id - if source_snapshot is not None: - request.source_snapshot = source_snapshot - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_table_from_snapshot] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.CreateTableFromSnapshotMetadata, - ) - - # Done; return the response. - return response - - def list_tables(self, - request: Optional[Union[bigtable_table_admin.ListTablesRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListTablesPager: - r"""Lists all tables served from a specified instance. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListTablesRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - parent (str): - Required. The unique name of the instance for which - tables should be listed. Values are of the form - ``projects/{project}/instances/{instance}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListTablesPager: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.ListTablesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.ListTablesRequest): - request = bigtable_table_admin.ListTablesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_tables] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListTablesPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def get_table(self, - request: Optional[Union[bigtable_table_admin.GetTableRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Table: - r"""Gets metadata information about the specified table. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetTableRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - name (str): - Required. The unique name of the requested table. Values - are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.GetTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.GetTableRequest): - request = bigtable_table_admin.GetTableRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_table(self, - request: Optional[Union[bigtable_table_admin.UpdateTableRequest, dict]] = None, - *, - table: Optional[gba_table.Table] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Updates a specified table. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateTableRequest, dict]): - The request object. The request for - [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. - table (google.cloud.bigtable_admin_v2.types.Table): - Required. The table to update. The table's ``name`` - field is used to identify the table to update. - - This corresponds to the ``table`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The list of fields to update. A mask - specifying which fields (e.g. ``change_stream_config``) - in the ``table`` field should be updated. This mask is - relative to the ``table`` field, not to the request - message. The wildcard (*) path is currently not - supported. Currently UpdateTable is only supported for - the following fields: - - - ``change_stream_config`` - - ``change_stream_config.retention_period`` - - ``deletion_protection`` - - If ``column_families`` is set in ``update_mask``, it - will return an UNIMPLEMENTED error. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([table, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.UpdateTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.UpdateTableRequest): - request = bigtable_table_admin.UpdateTableRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if table is not None: - request.table = table - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("table.name", request.table.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - gba_table.Table, - metadata_type=bigtable_table_admin.UpdateTableMetadata, - ) - - # Done; return the response. - return response - - def delete_table(self, - request: Optional[Union[bigtable_table_admin.DeleteTableRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Permanently deletes a specified table and all of its - data. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteTableRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - name (str): - Required. The unique name of the table to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.DeleteTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.DeleteTableRequest): - request = bigtable_table_admin.DeleteTableRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def undelete_table(self, - request: Optional[Union[bigtable_table_admin.UndeleteTableRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Restores a specified table which was accidentally - deleted. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.UndeleteTableRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] - name (str): - Required. The unique name of the table to be restored. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.UndeleteTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.UndeleteTableRequest): - request = bigtable_table_admin.UndeleteTableRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.undelete_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.UndeleteTableMetadata, - ) - - # Done; return the response. - return response - - def modify_column_families(self, - request: Optional[Union[bigtable_table_admin.ModifyColumnFamiliesRequest, dict]] = None, - *, - name: Optional[str] = None, - modifications: Optional[MutableSequence[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Table: - r"""Performs a series of column family modifications on - the specified table. Either all or none of the - modifications will occur before this method returns, but - data requests received prior to that point may see a - table where only some modifications have taken effect. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - name (str): - Required. The unique name of the table whose families - should be modified. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - modifications (MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): - Required. Modifications to be - atomically applied to the specified - table's families. Entries are applied in - order, meaning that earlier - modifications can be masked by later - ones (in the case of repeated updates to - the same family, for example). - - This corresponds to the ``modifications`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, modifications]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.ModifyColumnFamiliesRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.ModifyColumnFamiliesRequest): - request = bigtable_table_admin.ModifyColumnFamiliesRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if modifications is not None: - request.modifications = modifications - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.modify_column_families] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def drop_row_range(self, - request: Optional[Union[bigtable_table_admin.DropRowRangeRequest, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Permanently drop/delete a row range from a specified - table. The request can specify whether to delete all - rows in a table, or only those that match a particular - prefix. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DropRowRangeRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.DropRowRangeRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.DropRowRangeRequest): - request = bigtable_table_admin.DropRowRangeRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.drop_row_range] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def generate_consistency_token(self, - request: Optional[Union[bigtable_table_admin.GenerateConsistencyTokenRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: - r"""Generates a consistency token for a Table, which can - be used in CheckConsistency to check whether mutations - to the table that finished before this call started have - been replicated. The tokens will be available for 90 - days. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - name (str): - Required. The unique name of the Table for which to - create a consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.GenerateConsistencyTokenResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.GenerateConsistencyTokenRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.GenerateConsistencyTokenRequest): - request = bigtable_table_admin.GenerateConsistencyTokenRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.generate_consistency_token] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def check_consistency(self, - request: Optional[Union[bigtable_table_admin.CheckConsistencyRequest, dict]] = None, - *, - name: Optional[str] = None, - consistency_token: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> bigtable_table_admin.CheckConsistencyResponse: - r"""Checks replication consistency based on a consistency - token, that is, if replication has caught up based on - the conditions specified in the token and the check - request. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CheckConsistencyRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - name (str): - Required. The unique name of the Table for which to - check replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - consistency_token (str): - Required. The token created using - GenerateConsistencyToken for the Table. - - This corresponds to the ``consistency_token`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.CheckConsistencyResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, consistency_token]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CheckConsistencyRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.CheckConsistencyRequest): - request = bigtable_table_admin.CheckConsistencyRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if consistency_token is not None: - request.consistency_token = consistency_token - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.check_consistency] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def snapshot_table(self, - request: Optional[Union[bigtable_table_admin.SnapshotTableRequest, dict]] = None, - *, - name: Optional[str] = None, - cluster: Optional[str] = None, - snapshot_id: Optional[str] = None, - description: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Creates a new snapshot in the specified cluster from - the specified source table. The cluster and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.SnapshotTableRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - name (str): - Required. The unique name of the table to have the - snapshot taken. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - cluster (str): - Required. The name of the cluster where the snapshot - will be created in. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``cluster`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - snapshot_id (str): - Required. The ID by which the new snapshot should be - referred to within the parent cluster, e.g., - ``mysnapshot`` of the form: - ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. - - This corresponds to the ``snapshot_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - description (str): - Description of the snapshot. - This corresponds to the ``description`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Snapshot` A snapshot of a table at a particular time. A snapshot can be used as a - checkpoint for data restoration or a data source for - a new table. - - Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently - available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible - ways and is not recommended for production use. It is - not subject to any SLA or deprecation policy. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name, cluster, snapshot_id, description]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.SnapshotTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.SnapshotTableRequest): - request = bigtable_table_admin.SnapshotTableRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - if cluster is not None: - request.cluster = cluster - if snapshot_id is not None: - request.snapshot_id = snapshot_id - if description is not None: - request.description = description - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.snapshot_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.Snapshot, - metadata_type=bigtable_table_admin.SnapshotTableMetadata, - ) - - # Done; return the response. - return response - - def get_snapshot(self, - request: Optional[Union[bigtable_table_admin.GetSnapshotRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Snapshot: - r"""Gets metadata information about the specified - snapshot. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetSnapshotRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - name (str): - Required. The unique name of the requested snapshot. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Snapshot: - A snapshot of a table at a particular - time. A snapshot can be used as a - checkpoint for data restoration or a - data source for a new table. - - Note: This is a private alpha release of - Cloud Bigtable snapshots. This feature - is not currently available to most Cloud - Bigtable customers. This feature might - be changed in backward-incompatible ways - and is not recommended for production - use. It is not subject to any SLA or - deprecation policy. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.GetSnapshotRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.GetSnapshotRequest): - request = bigtable_table_admin.GetSnapshotRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_snapshot] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def list_snapshots(self, - request: Optional[Union[bigtable_table_admin.ListSnapshotsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListSnapshotsPager: - r"""Lists all snapshots associated with the specified - cluster. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - parent (str): - Required. The unique name of the cluster for which - snapshots should be listed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list snapshots for all - clusters in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListSnapshotsPager: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud - Bigtable snapshots. This feature is not currently - available to most Cloud Bigtable customers. This - feature might be changed in backward-incompatible - ways and is not recommended for production use. It is - not subject to any SLA or deprecation policy. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.ListSnapshotsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.ListSnapshotsRequest): - request = bigtable_table_admin.ListSnapshotsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_snapshots] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListSnapshotsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_snapshot(self, - request: Optional[Union[bigtable_table_admin.DeleteSnapshotRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteSnapshotRequest, dict]): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - name (str): - Required. The unique name of the snapshot to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.DeleteSnapshotRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.DeleteSnapshotRequest): - request = bigtable_table_admin.DeleteSnapshotRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_snapshot] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def create_backup(self, - request: Optional[Union[bigtable_table_admin.CreateBackupRequest, dict]] = None, - *, - parent: Optional[str] = None, - backup_id: Optional[str] = None, - backup: Optional[table.Backup] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be - used to track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Backup][google.bigtable.admin.v2.Backup], if successful. - Cancelling the returned operation will stop the creation and - delete the backup. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CreateBackupRequest, dict]): - The request object. The request for - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - parent (str): - Required. This must be one of the clusters in the - instance in which this table is located. The backup will - be stored in this cluster. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup_id (str): - Required. The id of the backup to be created. The - ``backup_id`` along with the parent ``parent`` are - combined as {parent}/backups/{backup_id} to create the - full backup name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - - This corresponds to the ``backup_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup (google.cloud.bigtable_admin_v2.types.Backup): - Required. The backup to create. - This corresponds to the ``backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.Backup` A - backup of a Cloud Bigtable table. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, backup_id, backup]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CreateBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.CreateBackupRequest): - request = bigtable_table_admin.CreateBackupRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if backup_id is not None: - request.backup_id = backup_id - if backup is not None: - request.backup = backup - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.create_backup] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.Backup, - metadata_type=bigtable_table_admin.CreateBackupMetadata, - ) - - # Done; return the response. - return response - - def get_backup(self, - request: Optional[Union[bigtable_table_admin.GetBackupRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Backup: - r"""Gets metadata on a pending or completed Cloud - Bigtable Backup. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.GetBackupRequest, dict]): - The request object. The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - name (str): - Required. Name of the backup. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Backup: - A backup of a Cloud Bigtable table. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.GetBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.GetBackupRequest): - request = bigtable_table_admin.GetBackupRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_backup] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def update_backup(self, - request: Optional[Union[bigtable_table_admin.UpdateBackupRequest, dict]] = None, - *, - backup: Optional[table.Backup] = None, - update_mask: Optional[field_mask_pb2.FieldMask] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> table.Backup: - r"""Updates a pending or completed Cloud Bigtable Backup. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.UpdateBackupRequest, dict]): - The request object. The request for - [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - backup (google.cloud.bigtable_admin_v2.types.Backup): - Required. The backup to update. ``backup.name``, and the - fields to be updated as specified by ``update_mask`` are - required. Other fields are ignored. Update is only - supported for the following fields: - - - ``backup.expire_time``. - - This corresponds to the ``backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A mask specifying which fields (e.g. - ``expire_time``) in the Backup resource should be - updated. This mask is relative to the Backup resource, - not to the request message. The field mask must always - be specified; this prevents any future fields from being - erased accidentally by clients that do not know about - them. - - This corresponds to the ``update_mask`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.types.Backup: - A backup of a Cloud Bigtable table. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([backup, update_mask]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.UpdateBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.UpdateBackupRequest): - request = bigtable_table_admin.UpdateBackupRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if backup is not None: - request.backup = backup - if update_mask is not None: - request.update_mask = update_mask - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.update_backup] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("backup.name", request.backup.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def delete_backup(self, - request: Optional[Union[bigtable_table_admin.DeleteBackupRequest, dict]] = None, - *, - name: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> None: - r"""Deletes a pending or completed Cloud Bigtable backup. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.DeleteBackupRequest, dict]): - The request object. The request for - [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. - name (str): - Required. Name of the backup to delete. Values are of - the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - - This corresponds to the ``name`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([name]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.DeleteBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.DeleteBackupRequest): - request = bigtable_table_admin.DeleteBackupRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if name is not None: - request.name = name - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.delete_backup] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("name", request.name), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - def list_backups(self, - request: Optional[Union[bigtable_table_admin.ListBackupsRequest, dict]] = None, - *, - parent: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> pagers.ListBackupsPager: - r"""Lists Cloud Bigtable backups. Returns both completed - and pending backups. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.ListBackupsRequest, dict]): - The request object. The request for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - parent (str): - Required. The cluster to list backups from. Values are - of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list backups for all clusters - in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.cloud.bigtable_admin_v2.services.bigtable_table_admin.pagers.ListBackupsPager: - The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - - Iterating over this object will yield results and - resolve additional pages automatically. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.ListBackupsRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.ListBackupsRequest): - request = bigtable_table_admin.ListBackupsRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.list_backups] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # This method is paged; wrap the response in a pager, which provides - # an `__iter__` convenience method. - response = pagers.ListBackupsPager( - method=rpc, - request=request, - response=response, - metadata=metadata, - ) - - # Done; return the response. - return response - - def restore_table(self, - request: Optional[Union[bigtable_table_admin.RestoreTableRequest, dict]] = None, - *, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Create a new table by restoring from a completed backup. The - returned table [long-running - operation][google.longrunning.Operation] can be used to track - the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. - The [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.RestoreTableRequest, dict]): - The request object. The request for - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be :class:`google.cloud.bigtable_admin_v2.types.Table` A collection of user data indexed by row, column, and timestamp. - Each table is served using the resources of its - parent cluster. - - """ - # Create or coerce a protobuf request object. - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.RestoreTableRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.RestoreTableRequest): - request = bigtable_table_admin.RestoreTableRequest(request) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.restore_table] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.Table, - metadata_type=bigtable_table_admin.RestoreTableMetadata, - ) - - # Done; return the response. - return response - - def copy_backup(self, - request: Optional[Union[bigtable_table_admin.CopyBackupRequest, dict]] = None, - *, - parent: Optional[str] = None, - backup_id: Optional[str] = None, - source_backup: Optional[str] = None, - expire_time: Optional[timestamp_pb2.Timestamp] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> operation.Operation: - r"""Copy a Cloud Bigtable backup to a new backup in the - destination cluster located in the destination instance - and project. - - Args: - request (Union[google.cloud.bigtable_admin_v2.types.CopyBackupRequest, dict]): - The request object. The request for - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. - parent (str): - Required. The name of the destination cluster that will - contain the backup copy. The cluster must already - exists. Values are of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - - This corresponds to the ``parent`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - backup_id (str): - Required. The id of the new backup. The ``backup_id`` - along with ``parent`` are combined as - {parent}/backups/{backup_id} to create the full backup - name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in - length and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - - This corresponds to the ``backup_id`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - source_backup (str): - Required. The source backup to be copied from. The - source backup needs to be in READY state for it to be - copied. Copying a copied backup is not allowed. Once - CopyBackup is in progress, the source backup cannot be - deleted or cleaned up on expiration until CopyBackup is - finished. Values are of the form: - ``projects//instances//clusters//backups/``. - - This corresponds to the ``source_backup`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - expire_time (google.protobuf.timestamp_pb2.Timestamp): - Required. Required. The expiration time of the copied - backup with microsecond granularity that must be at - least 6 hours and at most 30 days from the time the - request is received. Once the ``expire_time`` has - passed, Cloud Bigtable will delete the backup and free - the resources used by the backup. - - This corresponds to the ``expire_time`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.api_core.operation.Operation: - An object representing a long-running operation. - - The result type for the operation will be - :class:`google.cloud.bigtable_admin_v2.types.Backup` A - backup of a Cloud Bigtable table. - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([parent, backup_id, source_backup, expire_time]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - # Minor optimization to avoid making a copy if the user passes - # in a bigtable_table_admin.CopyBackupRequest. - # There's no risk of modifying the input as we've already verified - # there are no flattened fields. - if not isinstance(request, bigtable_table_admin.CopyBackupRequest): - request = bigtable_table_admin.CopyBackupRequest(request) - # If we have keyword arguments corresponding to fields on the - # request, apply these. - if parent is not None: - request.parent = parent - if backup_id is not None: - request.backup_id = backup_id - if source_backup is not None: - request.source_backup = source_backup - if expire_time is not None: - request.expire_time = expire_time - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.copy_backup] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("parent", request.parent), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Wrap the response in an operation future. - response = operation.from_gapic( - response, - self._transport.operations_client, - table.Backup, - metadata_type=bigtable_table_admin.CopyBackupMetadata, - ) - - # Done; return the response. - return response - - def get_iam_policy(self, - request: Optional[Union[iam_policy_pb2.GetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> policy_pb2.Policy: - r"""Gets the access control policy for a Table or Backup - resource. Returns an empty policy if the resource exists - but does not have a policy set. - - Args: - request (Union[google.iam.v1.iam_policy_pb2.GetIamPolicyRequest, dict]): - The request object. Request message for ``GetIamPolicy`` method. - resource (str): - REQUIRED: The resource for which the - policy is being requested. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.GetIamPolicyRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.GetIamPolicyRequest() - if resource is not None: - request.resource = resource - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.get_iam_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("resource", request.resource), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def set_iam_policy(self, - request: Optional[Union[iam_policy_pb2.SetIamPolicyRequest, dict]] = None, - *, - resource: Optional[str] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> policy_pb2.Policy: - r"""Sets the access control policy on a Table or Backup - resource. Replaces any existing policy. - - Args: - request (Union[google.iam.v1.iam_policy_pb2.SetIamPolicyRequest, dict]): - The request object. Request message for ``SetIamPolicy`` method. - resource (str): - REQUIRED: The resource for which the - policy is being specified. See the - operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which specifies access - controls for Google Cloud resources. - - A Policy is a collection of bindings. A binding binds - one or more members, or principals, to a single role. - Principals can be user accounts, service accounts, - Google groups, and domains (such as G Suite). A role - is a named list of permissions; each role can be an - IAM predefined role or a user-created custom role. - - For some types of Google Cloud resources, a binding - can also specify a condition, which is a logical - expression that allows access to a resource only if - the expression evaluates to true. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the [IAM - documentation](\ https://cloud.google.com/iam/help/conditions/resource-policies). - - **JSON example:** - - :literal:`\` { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:mike@example.com", "group:admins@example.com", "domain:google.com", "serviceAccount:my-project-id@appspot.gserviceaccount.com" ] }, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:eve@example.com" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", } } ], "etag": "BwWWja0YfJA=", "version": 3 }`\ \` - - **YAML example:** - - :literal:`\` bindings: - members: - user:mike@example.com - group:admins@example.com - domain:google.com - serviceAccount:my-project-id@appspot.gserviceaccount.com role: roles/resourcemanager.organizationAdmin - members: - user:eve@example.com role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') etag: BwWWja0YfJA= version: 3`\ \` - - For a description of IAM and its features, see the - [IAM - documentation](\ https://cloud.google.com/iam/docs/). - - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.SetIamPolicyRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.SetIamPolicyRequest() - if resource is not None: - request.resource = resource - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.set_iam_policy] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("resource", request.resource), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def test_iam_permissions(self, - request: Optional[Union[iam_policy_pb2.TestIamPermissionsRequest, dict]] = None, - *, - resource: Optional[str] = None, - permissions: Optional[MutableSequence[str]] = None, - retry: OptionalRetry = gapic_v1.method.DEFAULT, - timeout: Union[float, object] = gapic_v1.method.DEFAULT, - metadata: Sequence[Tuple[str, str]] = (), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Returns permissions that the caller has on the - specified Table or Backup resource. - - Args: - request (Union[google.iam.v1.iam_policy_pb2.TestIamPermissionsRequest, dict]): - The request object. Request message for ``TestIamPermissions`` method. - resource (str): - REQUIRED: The resource for which the - policy detail is being requested. See - the operation documentation for the - appropriate value for this field. - - This corresponds to the ``resource`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - permissions (MutableSequence[str]): - The set of permissions to check for the ``resource``. - Permissions with wildcards (such as '*' or 'storage.*') - are not allowed. For more information see `IAM - Overview `__. - - This corresponds to the ``permissions`` field - on the ``request`` instance; if ``request`` is provided, this - should not be set. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - google.iam.v1.iam_policy_pb2.TestIamPermissionsResponse: - Response message for TestIamPermissions method. - """ - # Create or coerce a protobuf request object. - # Quick check: If we got a request object, we should *not* have - # gotten any keyword arguments that map to the request. - has_flattened_params = any([resource, permissions]) - if request is not None and has_flattened_params: - raise ValueError('If the `request` argument is set, then none of ' - 'the individual field arguments should be set.') - - if isinstance(request, dict): - # The request isn't a proto-plus wrapped type, - # so it must be constructed via keyword expansion. - request = iam_policy_pb2.TestIamPermissionsRequest(**request) - elif not request: - # Null request, just make one. - request = iam_policy_pb2.TestIamPermissionsRequest() - if resource is not None: - request.resource = resource - if permissions: - request.permissions.extend(permissions) - - # Wrap the RPC method; this adds retry and timeout information, - # and friendly error handling. - rpc = self._transport._wrapped_methods[self._transport.test_iam_permissions] - - # Certain fields should be provided within the metadata header; - # add these here. - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ("resource", request.resource), - )), - ) - - # Validate the universe domain. - self._validate_universe_domain() - - # Send the request. - response = rpc( - request, - retry=retry, - timeout=timeout, - metadata=metadata, - ) - - # Done; return the response. - return response - - def __enter__(self) -> "BigtableTableAdminClient": - return self - - def __exit__(self, type, value, traceback): - """Releases underlying transport's resources. - - .. warning:: - ONLY use as a context manager if the transport is NOT shared - with other clients! Exiting the with block will CLOSE the transport - and may cause errors in other clients! - """ - self.transport.close() - - - - - - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -__all__ = ( - "BigtableTableAdminClient", -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py deleted file mode 100644 index adf3d6e23..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/pagers.py +++ /dev/null @@ -1,382 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from typing import Any, AsyncIterator, Awaitable, Callable, Sequence, Tuple, Optional, Iterator - -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table - - -class ListTablesPager: - """A pager for iterating through ``list_tables`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` object, and - provides an ``__iter__`` method to iterate through its - ``tables`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListTables`` requests and continue to iterate - through the ``tables`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., bigtable_table_admin.ListTablesResponse], - request: bigtable_table_admin.ListTablesRequest, - response: bigtable_table_admin.ListTablesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListTablesRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = bigtable_table_admin.ListTablesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_table_admin.ListTablesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[table.Table]: - for page in self.pages: - yield from page.tables - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListTablesAsyncPager: - """A pager for iterating through ``list_tables`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``tables`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListTables`` requests and continue to iterate - through the ``tables`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListTablesResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[bigtable_table_admin.ListTablesResponse]], - request: bigtable_table_admin.ListTablesRequest, - response: bigtable_table_admin.ListTablesResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListTablesRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListTablesResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = bigtable_table_admin.ListTablesRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[bigtable_table_admin.ListTablesResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - def __aiter__(self) -> AsyncIterator[table.Table]: - async def async_generator(): - async for page in self.pages: - for response in page.tables: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListSnapshotsPager: - """A pager for iterating through ``list_snapshots`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``snapshots`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListSnapshots`` requests and continue to iterate - through the ``snapshots`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., bigtable_table_admin.ListSnapshotsResponse], - request: bigtable_table_admin.ListSnapshotsRequest, - response: bigtable_table_admin.ListSnapshotsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = bigtable_table_admin.ListSnapshotsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_table_admin.ListSnapshotsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[table.Snapshot]: - for page in self.pages: - yield from page.snapshots - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListSnapshotsAsyncPager: - """A pager for iterating through ``list_snapshots`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``snapshots`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListSnapshots`` requests and continue to iterate - through the ``snapshots`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[bigtable_table_admin.ListSnapshotsResponse]], - request: bigtable_table_admin.ListSnapshotsRequest, - response: bigtable_table_admin.ListSnapshotsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListSnapshotsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListSnapshotsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = bigtable_table_admin.ListSnapshotsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[bigtable_table_admin.ListSnapshotsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - def __aiter__(self) -> AsyncIterator[table.Snapshot]: - async def async_generator(): - async for page in self.pages: - for response in page.snapshots: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListBackupsPager: - """A pager for iterating through ``list_backups`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` object, and - provides an ``__iter__`` method to iterate through its - ``backups`` field. - - If there are more pages, the ``__iter__`` method will make additional - ``ListBackups`` requests and continue to iterate - through the ``backups`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., bigtable_table_admin.ListBackupsResponse], - request: bigtable_table_admin.ListBackupsRequest, - response: bigtable_table_admin.ListBackupsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiate the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListBackupsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = bigtable_table_admin.ListBackupsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - def pages(self) -> Iterator[bigtable_table_admin.ListBackupsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = self._method(self._request, metadata=self._metadata) - yield self._response - - def __iter__(self) -> Iterator[table.Backup]: - for page in self.pages: - yield from page.backups - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) - - -class ListBackupsAsyncPager: - """A pager for iterating through ``list_backups`` requests. - - This class thinly wraps an initial - :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` object, and - provides an ``__aiter__`` method to iterate through its - ``backups`` field. - - If there are more pages, the ``__aiter__`` method will make additional - ``ListBackups`` requests and continue to iterate - through the ``backups`` field on the - corresponding responses. - - All the usual :class:`google.cloud.bigtable_admin_v2.types.ListBackupsResponse` - attributes are available on the pager. If multiple requests are made, only - the most recent response is retained, and thus used for attribute lookup. - """ - def __init__(self, - method: Callable[..., Awaitable[bigtable_table_admin.ListBackupsResponse]], - request: bigtable_table_admin.ListBackupsRequest, - response: bigtable_table_admin.ListBackupsResponse, - *, - metadata: Sequence[Tuple[str, str]] = ()): - """Instantiates the pager. - - Args: - method (Callable): The method that was originally called, and - which instantiated this pager. - request (google.cloud.bigtable_admin_v2.types.ListBackupsRequest): - The initial request object. - response (google.cloud.bigtable_admin_v2.types.ListBackupsResponse): - The initial response object. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - self._method = method - self._request = bigtable_table_admin.ListBackupsRequest(request) - self._response = response - self._metadata = metadata - - def __getattr__(self, name: str) -> Any: - return getattr(self._response, name) - - @property - async def pages(self) -> AsyncIterator[bigtable_table_admin.ListBackupsResponse]: - yield self._response - while self._response.next_page_token: - self._request.page_token = self._response.next_page_token - self._response = await self._method(self._request, metadata=self._metadata) - yield self._response - def __aiter__(self) -> AsyncIterator[table.Backup]: - async def async_generator(): - async for page in self.pages: - for response in page.backups: - yield response - - return async_generator() - - def __repr__(self) -> str: - return '{0}<{1!r}>'.format(self.__class__.__name__, self._response) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py deleted file mode 100644 index e2256b316..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from collections import OrderedDict -from typing import Dict, Type - -from .base import BigtableTableAdminTransport -from .grpc import BigtableTableAdminGrpcTransport -from .grpc_asyncio import BigtableTableAdminGrpcAsyncIOTransport -from .rest import BigtableTableAdminRestTransport -from .rest import BigtableTableAdminRestInterceptor - - -# Compile a registry of transports. -_transport_registry = OrderedDict() # type: Dict[str, Type[BigtableTableAdminTransport]] -_transport_registry['grpc'] = BigtableTableAdminGrpcTransport -_transport_registry['grpc_asyncio'] = BigtableTableAdminGrpcAsyncIOTransport -_transport_registry['rest'] = BigtableTableAdminRestTransport - -__all__ = ( - 'BigtableTableAdminTransport', - 'BigtableTableAdminGrpcTransport', - 'BigtableTableAdminGrpcAsyncIOTransport', - 'BigtableTableAdminRestTransport', - 'BigtableTableAdminRestInterceptor', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py deleted file mode 100644 index 7856a17bc..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/base.py +++ /dev/null @@ -1,575 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import abc -from typing import Awaitable, Callable, Dict, Optional, Sequence, Union - -from google.cloud.bigtable_admin_v2 import gapic_version as package_version - -import google.auth # type: ignore -import google.api_core -from google.api_core import exceptions as core_exceptions -from google.api_core import gapic_v1 -from google.api_core import retry as retries -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.oauth2 import service_account # type: ignore - -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(gapic_version=package_version.__version__) - - -class BigtableTableAdminTransport(abc.ABC): - """Abstract transport class for BigtableTableAdmin.""" - - AUTH_SCOPES = ( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', - ) - - DEFAULT_HOST: str = 'bigtableadmin.googleapis.com' - def __init__( - self, *, - host: str = DEFAULT_HOST, - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - **kwargs, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A list of scopes. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - """ - - scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES} - - # Save the scopes. - self._scopes = scopes - - # If no credentials are provided, then determine the appropriate - # defaults. - if credentials and credentials_file: - raise core_exceptions.DuplicateCredentialArgs("'credentials_file' and 'credentials' are mutually exclusive") - - if credentials_file is not None: - credentials, _ = google.auth.load_credentials_from_file( - credentials_file, - **scopes_kwargs, - quota_project_id=quota_project_id - ) - elif credentials is None: - credentials, _ = google.auth.default(**scopes_kwargs, quota_project_id=quota_project_id) - # Don't apply audience if the credentials file passed from user. - if hasattr(credentials, "with_gdch_audience"): - credentials = credentials.with_gdch_audience(api_audience if api_audience else host) - - # If the credentials are service account credentials, then always try to use self signed JWT. - if always_use_jwt_access and isinstance(credentials, service_account.Credentials) and hasattr(service_account.Credentials, "with_always_use_jwt_access"): - credentials = credentials.with_always_use_jwt_access(True) - - # Save the credentials. - self._credentials = credentials - - # Save the hostname. Default to port 443 (HTTPS) if none is specified. - if ':' not in host: - host += ':443' - self._host = host - - @property - def host(self): - return self._host - - def _prep_wrapped_messages(self, client_info): - # Precompute the wrapped methods. - self._wrapped_methods = { - self.create_table: gapic_v1.method.wrap_method( - self.create_table, - default_timeout=300.0, - client_info=client_info, - ), - self.create_table_from_snapshot: gapic_v1.method.wrap_method( - self.create_table_from_snapshot, - default_timeout=None, - client_info=client_info, - ), - self.list_tables: gapic_v1.method.wrap_method( - self.list_tables, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.get_table: gapic_v1.method.wrap_method( - self.get_table, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_table: gapic_v1.method.wrap_method( - self.update_table, - default_timeout=None, - client_info=client_info, - ), - self.delete_table: gapic_v1.method.wrap_method( - self.delete_table, - default_timeout=60.0, - client_info=client_info, - ), - self.undelete_table: gapic_v1.method.wrap_method( - self.undelete_table, - default_timeout=None, - client_info=client_info, - ), - self.modify_column_families: gapic_v1.method.wrap_method( - self.modify_column_families, - default_timeout=300.0, - client_info=client_info, - ), - self.drop_row_range: gapic_v1.method.wrap_method( - self.drop_row_range, - default_timeout=3600.0, - client_info=client_info, - ), - self.generate_consistency_token: gapic_v1.method.wrap_method( - self.generate_consistency_token, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.check_consistency: gapic_v1.method.wrap_method( - self.check_consistency, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.snapshot_table: gapic_v1.method.wrap_method( - self.snapshot_table, - default_timeout=None, - client_info=client_info, - ), - self.get_snapshot: gapic_v1.method.wrap_method( - self.get_snapshot, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.list_snapshots: gapic_v1.method.wrap_method( - self.list_snapshots, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.delete_snapshot: gapic_v1.method.wrap_method( - self.delete_snapshot, - default_timeout=60.0, - client_info=client_info, - ), - self.create_backup: gapic_v1.method.wrap_method( - self.create_backup, - default_timeout=60.0, - client_info=client_info, - ), - self.get_backup: gapic_v1.method.wrap_method( - self.get_backup, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.update_backup: gapic_v1.method.wrap_method( - self.update_backup, - default_timeout=60.0, - client_info=client_info, - ), - self.delete_backup: gapic_v1.method.wrap_method( - self.delete_backup, - default_timeout=60.0, - client_info=client_info, - ), - self.list_backups: gapic_v1.method.wrap_method( - self.list_backups, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.restore_table: gapic_v1.method.wrap_method( - self.restore_table, - default_timeout=60.0, - client_info=client_info, - ), - self.copy_backup: gapic_v1.method.wrap_method( - self.copy_backup, - default_timeout=None, - client_info=client_info, - ), - self.get_iam_policy: gapic_v1.method.wrap_method( - self.get_iam_policy, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - self.set_iam_policy: gapic_v1.method.wrap_method( - self.set_iam_policy, - default_timeout=60.0, - client_info=client_info, - ), - self.test_iam_permissions: gapic_v1.method.wrap_method( - self.test_iam_permissions, - default_retry=retries.Retry( -initial=1.0,maximum=60.0,multiplier=2, predicate=retries.if_exception_type( - core_exceptions.DeadlineExceeded, - core_exceptions.ServiceUnavailable, - ), - deadline=60.0, - ), - default_timeout=60.0, - client_info=client_info, - ), - } - - def close(self): - """Closes resources associated with the transport. - - .. warning:: - Only call this method if the transport is NOT shared - with other clients - this may cause errors in other clients! - """ - raise NotImplementedError() - - @property - def operations_client(self): - """Return the client designed to process long-running operations.""" - raise NotImplementedError() - - @property - def create_table(self) -> Callable[ - [bigtable_table_admin.CreateTableRequest], - Union[ - gba_table.Table, - Awaitable[gba_table.Table] - ]]: - raise NotImplementedError() - - @property - def create_table_from_snapshot(self) -> Callable[ - [bigtable_table_admin.CreateTableFromSnapshotRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def list_tables(self) -> Callable[ - [bigtable_table_admin.ListTablesRequest], - Union[ - bigtable_table_admin.ListTablesResponse, - Awaitable[bigtable_table_admin.ListTablesResponse] - ]]: - raise NotImplementedError() - - @property - def get_table(self) -> Callable[ - [bigtable_table_admin.GetTableRequest], - Union[ - table.Table, - Awaitable[table.Table] - ]]: - raise NotImplementedError() - - @property - def update_table(self) -> Callable[ - [bigtable_table_admin.UpdateTableRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def delete_table(self) -> Callable[ - [bigtable_table_admin.DeleteTableRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def undelete_table(self) -> Callable[ - [bigtable_table_admin.UndeleteTableRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def modify_column_families(self) -> Callable[ - [bigtable_table_admin.ModifyColumnFamiliesRequest], - Union[ - table.Table, - Awaitable[table.Table] - ]]: - raise NotImplementedError() - - @property - def drop_row_range(self) -> Callable[ - [bigtable_table_admin.DropRowRangeRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def generate_consistency_token(self) -> Callable[ - [bigtable_table_admin.GenerateConsistencyTokenRequest], - Union[ - bigtable_table_admin.GenerateConsistencyTokenResponse, - Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse] - ]]: - raise NotImplementedError() - - @property - def check_consistency(self) -> Callable[ - [bigtable_table_admin.CheckConsistencyRequest], - Union[ - bigtable_table_admin.CheckConsistencyResponse, - Awaitable[bigtable_table_admin.CheckConsistencyResponse] - ]]: - raise NotImplementedError() - - @property - def snapshot_table(self) -> Callable[ - [bigtable_table_admin.SnapshotTableRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_snapshot(self) -> Callable[ - [bigtable_table_admin.GetSnapshotRequest], - Union[ - table.Snapshot, - Awaitable[table.Snapshot] - ]]: - raise NotImplementedError() - - @property - def list_snapshots(self) -> Callable[ - [bigtable_table_admin.ListSnapshotsRequest], - Union[ - bigtable_table_admin.ListSnapshotsResponse, - Awaitable[bigtable_table_admin.ListSnapshotsResponse] - ]]: - raise NotImplementedError() - - @property - def delete_snapshot(self) -> Callable[ - [bigtable_table_admin.DeleteSnapshotRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def create_backup(self) -> Callable[ - [bigtable_table_admin.CreateBackupRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_backup(self) -> Callable[ - [bigtable_table_admin.GetBackupRequest], - Union[ - table.Backup, - Awaitable[table.Backup] - ]]: - raise NotImplementedError() - - @property - def update_backup(self) -> Callable[ - [bigtable_table_admin.UpdateBackupRequest], - Union[ - table.Backup, - Awaitable[table.Backup] - ]]: - raise NotImplementedError() - - @property - def delete_backup(self) -> Callable[ - [bigtable_table_admin.DeleteBackupRequest], - Union[ - empty_pb2.Empty, - Awaitable[empty_pb2.Empty] - ]]: - raise NotImplementedError() - - @property - def list_backups(self) -> Callable[ - [bigtable_table_admin.ListBackupsRequest], - Union[ - bigtable_table_admin.ListBackupsResponse, - Awaitable[bigtable_table_admin.ListBackupsResponse] - ]]: - raise NotImplementedError() - - @property - def restore_table(self) -> Callable[ - [bigtable_table_admin.RestoreTableRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def copy_backup(self) -> Callable[ - [bigtable_table_admin.CopyBackupRequest], - Union[ - operations_pb2.Operation, - Awaitable[operations_pb2.Operation] - ]]: - raise NotImplementedError() - - @property - def get_iam_policy(self) -> Callable[ - [iam_policy_pb2.GetIamPolicyRequest], - Union[ - policy_pb2.Policy, - Awaitable[policy_pb2.Policy] - ]]: - raise NotImplementedError() - - @property - def set_iam_policy(self) -> Callable[ - [iam_policy_pb2.SetIamPolicyRequest], - Union[ - policy_pb2.Policy, - Awaitable[policy_pb2.Policy] - ]]: - raise NotImplementedError() - - @property - def test_iam_permissions(self) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - Union[ - iam_policy_pb2.TestIamPermissionsResponse, - Awaitable[iam_policy_pb2.TestIamPermissionsResponse] - ]]: - raise NotImplementedError() - - @property - def kind(self) -> str: - raise NotImplementedError() - - -__all__ = ( - 'BigtableTableAdminTransport', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py deleted file mode 100644 index 80b6f4d65..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc.py +++ /dev/null @@ -1,996 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import grpc_helpers -from google.api_core import operations_v1 -from google.api_core import gapic_v1 -import google.auth # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore - -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO - - -class BigtableTableAdminGrpcTransport(BigtableTableAdminTransport): - """gRPC backend transport for BigtableTableAdmin. - - Service for creating, configuring, and deleting Cloud - Bigtable tables. - - Provides access to the table schemas only, not the data stored - within the tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - _stubs: Dict[str, Callable] - - def __init__(self, *, - host: str = 'bigtableadmin.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[grpc.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - channel (Optional[grpc.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @classmethod - def create_channel(cls, - host: str = 'bigtableadmin.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> grpc.Channel: - """Create and return a gRPC channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is mutually exclusive with credentials. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - grpc.Channel: A gRPC channel object. - - Raises: - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - - return grpc_helpers.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - @property - def grpc_channel(self) -> grpc.Channel: - """Return the channel designed to connect to this service. - """ - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_table(self) -> Callable[ - [bigtable_table_admin.CreateTableRequest], - gba_table.Table]: - r"""Return a callable for the create table method over gRPC. - - Creates a new table in the specified instance. - The table can be created with a full set of initial - column families, specified in the request. - - Returns: - Callable[[~.CreateTableRequest], - ~.Table]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_table' not in self._stubs: - self._stubs['create_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable', - request_serializer=bigtable_table_admin.CreateTableRequest.serialize, - response_deserializer=gba_table.Table.deserialize, - ) - return self._stubs['create_table'] - - @property - def create_table_from_snapshot(self) -> Callable[ - [bigtable_table_admin.CreateTableFromSnapshotRequest], - operations_pb2.Operation]: - r"""Return a callable for the create table from snapshot method over gRPC. - - Creates a new table from the specified snapshot. The - target table must not exist. The snapshot and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.CreateTableFromSnapshotRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_table_from_snapshot' not in self._stubs: - self._stubs['create_table_from_snapshot'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot', - request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_table_from_snapshot'] - - @property - def list_tables(self) -> Callable[ - [bigtable_table_admin.ListTablesRequest], - bigtable_table_admin.ListTablesResponse]: - r"""Return a callable for the list tables method over gRPC. - - Lists all tables served from a specified instance. - - Returns: - Callable[[~.ListTablesRequest], - ~.ListTablesResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tables' not in self._stubs: - self._stubs['list_tables'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/ListTables', - request_serializer=bigtable_table_admin.ListTablesRequest.serialize, - response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, - ) - return self._stubs['list_tables'] - - @property - def get_table(self) -> Callable[ - [bigtable_table_admin.GetTableRequest], - table.Table]: - r"""Return a callable for the get table method over gRPC. - - Gets metadata information about the specified table. - - Returns: - Callable[[~.GetTableRequest], - ~.Table]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_table' not in self._stubs: - self._stubs['get_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GetTable', - request_serializer=bigtable_table_admin.GetTableRequest.serialize, - response_deserializer=table.Table.deserialize, - ) - return self._stubs['get_table'] - - @property - def update_table(self) -> Callable[ - [bigtable_table_admin.UpdateTableRequest], - operations_pb2.Operation]: - r"""Return a callable for the update table method over gRPC. - - Updates a specified table. - - Returns: - Callable[[~.UpdateTableRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_table' not in self._stubs: - self._stubs['update_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable', - request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_table'] - - @property - def delete_table(self) -> Callable[ - [bigtable_table_admin.DeleteTableRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete table method over gRPC. - - Permanently deletes a specified table and all of its - data. - - Returns: - Callable[[~.DeleteTableRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_table' not in self._stubs: - self._stubs['delete_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable', - request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_table'] - - @property - def undelete_table(self) -> Callable[ - [bigtable_table_admin.UndeleteTableRequest], - operations_pb2.Operation]: - r"""Return a callable for the undelete table method over gRPC. - - Restores a specified table which was accidentally - deleted. - - Returns: - Callable[[~.UndeleteTableRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undelete_table' not in self._stubs: - self._stubs['undelete_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable', - request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undelete_table'] - - @property - def modify_column_families(self) -> Callable[ - [bigtable_table_admin.ModifyColumnFamiliesRequest], - table.Table]: - r"""Return a callable for the modify column families method over gRPC. - - Performs a series of column family modifications on - the specified table. Either all or none of the - modifications will occur before this method returns, but - data requests received prior to that point may see a - table where only some modifications have taken effect. - - Returns: - Callable[[~.ModifyColumnFamiliesRequest], - ~.Table]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'modify_column_families' not in self._stubs: - self._stubs['modify_column_families'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies', - request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, - response_deserializer=table.Table.deserialize, - ) - return self._stubs['modify_column_families'] - - @property - def drop_row_range(self) -> Callable[ - [bigtable_table_admin.DropRowRangeRequest], - empty_pb2.Empty]: - r"""Return a callable for the drop row range method over gRPC. - - Permanently drop/delete a row range from a specified - table. The request can specify whether to delete all - rows in a table, or only those that match a particular - prefix. - - Returns: - Callable[[~.DropRowRangeRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'drop_row_range' not in self._stubs: - self._stubs['drop_row_range'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange', - request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['drop_row_range'] - - @property - def generate_consistency_token(self) -> Callable[ - [bigtable_table_admin.GenerateConsistencyTokenRequest], - bigtable_table_admin.GenerateConsistencyTokenResponse]: - r"""Return a callable for the generate consistency token method over gRPC. - - Generates a consistency token for a Table, which can - be used in CheckConsistency to check whether mutations - to the table that finished before this call started have - been replicated. The tokens will be available for 90 - days. - - Returns: - Callable[[~.GenerateConsistencyTokenRequest], - ~.GenerateConsistencyTokenResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'generate_consistency_token' not in self._stubs: - self._stubs['generate_consistency_token'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken', - request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, - response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, - ) - return self._stubs['generate_consistency_token'] - - @property - def check_consistency(self) -> Callable[ - [bigtable_table_admin.CheckConsistencyRequest], - bigtable_table_admin.CheckConsistencyResponse]: - r"""Return a callable for the check consistency method over gRPC. - - Checks replication consistency based on a consistency - token, that is, if replication has caught up based on - the conditions specified in the token and the check - request. - - Returns: - Callable[[~.CheckConsistencyRequest], - ~.CheckConsistencyResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'check_consistency' not in self._stubs: - self._stubs['check_consistency'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency', - request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, - response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, - ) - return self._stubs['check_consistency'] - - @property - def snapshot_table(self) -> Callable[ - [bigtable_table_admin.SnapshotTableRequest], - operations_pb2.Operation]: - r"""Return a callable for the snapshot table method over gRPC. - - Creates a new snapshot in the specified cluster from - the specified source table. The cluster and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.SnapshotTableRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'snapshot_table' not in self._stubs: - self._stubs['snapshot_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable', - request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['snapshot_table'] - - @property - def get_snapshot(self) -> Callable[ - [bigtable_table_admin.GetSnapshotRequest], - table.Snapshot]: - r"""Return a callable for the get snapshot method over gRPC. - - Gets metadata information about the specified - snapshot. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.GetSnapshotRequest], - ~.Snapshot]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_snapshot' not in self._stubs: - self._stubs['get_snapshot'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot', - request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, - response_deserializer=table.Snapshot.deserialize, - ) - return self._stubs['get_snapshot'] - - @property - def list_snapshots(self) -> Callable[ - [bigtable_table_admin.ListSnapshotsRequest], - bigtable_table_admin.ListSnapshotsResponse]: - r"""Return a callable for the list snapshots method over gRPC. - - Lists all snapshots associated with the specified - cluster. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.ListSnapshotsRequest], - ~.ListSnapshotsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_snapshots' not in self._stubs: - self._stubs['list_snapshots'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots', - request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, - response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, - ) - return self._stubs['list_snapshots'] - - @property - def delete_snapshot(self) -> Callable[ - [bigtable_table_admin.DeleteSnapshotRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete snapshot method over gRPC. - - Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.DeleteSnapshotRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_snapshot' not in self._stubs: - self._stubs['delete_snapshot'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot', - request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_snapshot'] - - @property - def create_backup(self) -> Callable[ - [bigtable_table_admin.CreateBackupRequest], - operations_pb2.Operation]: - r"""Return a callable for the create backup method over gRPC. - - Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be - used to track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Backup][google.bigtable.admin.v2.Backup], if successful. - Cancelling the returned operation will stop the creation and - delete the backup. - - Returns: - Callable[[~.CreateBackupRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_backup' not in self._stubs: - self._stubs['create_backup'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup', - request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_backup'] - - @property - def get_backup(self) -> Callable[ - [bigtable_table_admin.GetBackupRequest], - table.Backup]: - r"""Return a callable for the get backup method over gRPC. - - Gets metadata on a pending or completed Cloud - Bigtable Backup. - - Returns: - Callable[[~.GetBackupRequest], - ~.Backup]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_backup' not in self._stubs: - self._stubs['get_backup'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup', - request_serializer=bigtable_table_admin.GetBackupRequest.serialize, - response_deserializer=table.Backup.deserialize, - ) - return self._stubs['get_backup'] - - @property - def update_backup(self) -> Callable[ - [bigtable_table_admin.UpdateBackupRequest], - table.Backup]: - r"""Return a callable for the update backup method over gRPC. - - Updates a pending or completed Cloud Bigtable Backup. - - Returns: - Callable[[~.UpdateBackupRequest], - ~.Backup]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_backup' not in self._stubs: - self._stubs['update_backup'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup', - request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, - response_deserializer=table.Backup.deserialize, - ) - return self._stubs['update_backup'] - - @property - def delete_backup(self) -> Callable[ - [bigtable_table_admin.DeleteBackupRequest], - empty_pb2.Empty]: - r"""Return a callable for the delete backup method over gRPC. - - Deletes a pending or completed Cloud Bigtable backup. - - Returns: - Callable[[~.DeleteBackupRequest], - ~.Empty]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_backup' not in self._stubs: - self._stubs['delete_backup'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup', - request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_backup'] - - @property - def list_backups(self) -> Callable[ - [bigtable_table_admin.ListBackupsRequest], - bigtable_table_admin.ListBackupsResponse]: - r"""Return a callable for the list backups method over gRPC. - - Lists Cloud Bigtable backups. Returns both completed - and pending backups. - - Returns: - Callable[[~.ListBackupsRequest], - ~.ListBackupsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_backups' not in self._stubs: - self._stubs['list_backups'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups', - request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, - response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, - ) - return self._stubs['list_backups'] - - @property - def restore_table(self) -> Callable[ - [bigtable_table_admin.RestoreTableRequest], - operations_pb2.Operation]: - r"""Return a callable for the restore table method over gRPC. - - Create a new table by restoring from a completed backup. The - returned table [long-running - operation][google.longrunning.Operation] can be used to track - the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. - The [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. - - Returns: - Callable[[~.RestoreTableRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'restore_table' not in self._stubs: - self._stubs['restore_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable', - request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['restore_table'] - - @property - def copy_backup(self) -> Callable[ - [bigtable_table_admin.CopyBackupRequest], - operations_pb2.Operation]: - r"""Return a callable for the copy backup method over gRPC. - - Copy a Cloud Bigtable backup to a new backup in the - destination cluster located in the destination instance - and project. - - Returns: - Callable[[~.CopyBackupRequest], - ~.Operation]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'copy_backup' not in self._stubs: - self._stubs['copy_backup'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup', - request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['copy_backup'] - - @property - def get_iam_policy(self) -> Callable[ - [iam_policy_pb2.GetIamPolicyRequest], - policy_pb2.Policy]: - r"""Return a callable for the get iam policy method over gRPC. - - Gets the access control policy for a Table or Backup - resource. Returns an empty policy if the resource exists - but does not have a policy set. - - Returns: - Callable[[~.GetIamPolicyRequest], - ~.Policy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_iam_policy' not in self._stubs: - self._stubs['get_iam_policy'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy', - request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs['get_iam_policy'] - - @property - def set_iam_policy(self) -> Callable[ - [iam_policy_pb2.SetIamPolicyRequest], - policy_pb2.Policy]: - r"""Return a callable for the set iam policy method over gRPC. - - Sets the access control policy on a Table or Backup - resource. Replaces any existing policy. - - Returns: - Callable[[~.SetIamPolicyRequest], - ~.Policy]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_iam_policy' not in self._stubs: - self._stubs['set_iam_policy'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy', - request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs['set_iam_policy'] - - @property - def test_iam_permissions(self) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - iam_policy_pb2.TestIamPermissionsResponse]: - r"""Return a callable for the test iam permissions method over gRPC. - - Returns permissions that the caller has on the - specified Table or Backup resource. - - Returns: - Callable[[~.TestIamPermissionsRequest], - ~.TestIamPermissionsResponse]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'test_iam_permissions' not in self._stubs: - self._stubs['test_iam_permissions'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions', - request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, - ) - return self._stubs['test_iam_permissions'] - - def close(self): - self.grpc_channel.close() - - @property - def kind(self) -> str: - return "grpc" - - -__all__ = ( - 'BigtableTableAdminGrpcTransport', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py deleted file mode 100644 index bebae212c..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/grpc_asyncio.py +++ /dev/null @@ -1,995 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import warnings -from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union - -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers_async -from google.api_core import operations_v1 -from google.auth import credentials as ga_credentials # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore - -import grpc # type: ignore -from grpc.experimental import aio # type: ignore - -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO -from .grpc import BigtableTableAdminGrpcTransport - - -class BigtableTableAdminGrpcAsyncIOTransport(BigtableTableAdminTransport): - """gRPC AsyncIO backend transport for BigtableTableAdmin. - - Service for creating, configuring, and deleting Cloud - Bigtable tables. - - Provides access to the table schemas only, not the data stored - within the tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends protocol buffers over the wire using gRPC (which is built on - top of HTTP/2); the ``grpcio`` package must be installed. - """ - - _grpc_channel: aio.Channel - _stubs: Dict[str, Callable] = {} - - @classmethod - def create_channel(cls, - host: str = 'bigtableadmin.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - quota_project_id: Optional[str] = None, - **kwargs) -> aio.Channel: - """Create and return a gRPC AsyncIO channel object. - Args: - host (Optional[str]): The host for the channel to use. - credentials (Optional[~.Credentials]): The - authorization credentials to attach to requests. These - credentials identify this application to the service. If - none are specified, the client will attempt to ascertain - the credentials from the environment. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - kwargs (Optional[dict]): Keyword arguments, which are passed to the - channel creation. - Returns: - aio.Channel: A gRPC AsyncIO channel object. - """ - - return grpc_helpers_async.create_channel( - host, - credentials=credentials, - credentials_file=credentials_file, - quota_project_id=quota_project_id, - default_scopes=cls.AUTH_SCOPES, - scopes=scopes, - default_host=cls.DEFAULT_HOST, - **kwargs - ) - - def __init__(self, *, - host: str = 'bigtableadmin.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - channel: Optional[aio.Channel] = None, - api_mtls_endpoint: Optional[str] = None, - client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None, - client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - This argument is ignored if ``channel`` is provided. - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional[Sequence[str]]): A optional list of scopes needed for this - service. These are only used when credentials are not specified and - are passed to :func:`google.auth.default`. - channel (Optional[aio.Channel]): A ``Channel`` instance through - which to make calls. - api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint. - If provided, it overrides the ``host`` argument and tries to create - a mutual TLS channel with client SSL credentials from - ``client_cert_source`` or application default SSL credentials. - client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]): - Deprecated. A callback to provide client SSL certificate bytes and - private key bytes, both in PEM format. It is ignored if - ``api_mtls_endpoint`` is None. - ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials - for the grpc channel. It is ignored if ``channel`` is provided. - client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]): - A callback to provide client certificate bytes and private key bytes, - both in PEM format. It is used to configure a mutual TLS channel. It is - ignored if ``channel`` or ``ssl_channel_credentials`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you're developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - - Raises: - google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport - creation failed for any reason. - google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials`` - and ``credentials_file`` are passed. - """ - self._grpc_channel = None - self._ssl_channel_credentials = ssl_channel_credentials - self._stubs: Dict[str, Callable] = {} - self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None - - if api_mtls_endpoint: - warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning) - if client_cert_source: - warnings.warn("client_cert_source is deprecated", DeprecationWarning) - - if channel: - # Ignore credentials if a channel was passed. - credentials = False - # If a channel was explicitly provided, set it. - self._grpc_channel = channel - self._ssl_channel_credentials = None - else: - if api_mtls_endpoint: - host = api_mtls_endpoint - - # Create SSL credentials with client_cert_source or application - # default SSL credentials. - if client_cert_source: - cert, key = client_cert_source() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - else: - self._ssl_channel_credentials = SslCredentials().ssl_credentials - - else: - if client_cert_source_for_mtls and not ssl_channel_credentials: - cert, key = client_cert_source_for_mtls() - self._ssl_channel_credentials = grpc.ssl_channel_credentials( - certificate_chain=cert, private_key=key - ) - - # The base transport sets the host, credentials and scopes - super().__init__( - host=host, - credentials=credentials, - credentials_file=credentials_file, - scopes=scopes, - quota_project_id=quota_project_id, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience, - ) - - if not self._grpc_channel: - self._grpc_channel = type(self).create_channel( - self._host, - # use the credentials which are saved - credentials=self._credentials, - # Set ``credentials_file`` to ``None`` here as - # the credentials that we saved earlier should be used. - credentials_file=None, - scopes=self._scopes, - ssl_credentials=self._ssl_channel_credentials, - quota_project_id=quota_project_id, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Wrap messages. This must be done after self._grpc_channel exists - self._prep_wrapped_messages(client_info) - - @property - def grpc_channel(self) -> aio.Channel: - """Create the channel designed to connect to this service. - - This property caches on the instance; repeated calls return - the same channel. - """ - # Return the channel from cache. - return self._grpc_channel - - @property - def operations_client(self) -> operations_v1.OperationsAsyncClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Quick check: Only create a new client if we do not already have one. - if self._operations_client is None: - self._operations_client = operations_v1.OperationsAsyncClient( - self.grpc_channel - ) - - # Return the client from cache. - return self._operations_client - - @property - def create_table(self) -> Callable[ - [bigtable_table_admin.CreateTableRequest], - Awaitable[gba_table.Table]]: - r"""Return a callable for the create table method over gRPC. - - Creates a new table in the specified instance. - The table can be created with a full set of initial - column families, specified in the request. - - Returns: - Callable[[~.CreateTableRequest], - Awaitable[~.Table]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_table' not in self._stubs: - self._stubs['create_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTable', - request_serializer=bigtable_table_admin.CreateTableRequest.serialize, - response_deserializer=gba_table.Table.deserialize, - ) - return self._stubs['create_table'] - - @property - def create_table_from_snapshot(self) -> Callable[ - [bigtable_table_admin.CreateTableFromSnapshotRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create table from snapshot method over gRPC. - - Creates a new table from the specified snapshot. The - target table must not exist. The snapshot and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.CreateTableFromSnapshotRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_table_from_snapshot' not in self._stubs: - self._stubs['create_table_from_snapshot'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CreateTableFromSnapshot', - request_serializer=bigtable_table_admin.CreateTableFromSnapshotRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_table_from_snapshot'] - - @property - def list_tables(self) -> Callable[ - [bigtable_table_admin.ListTablesRequest], - Awaitable[bigtable_table_admin.ListTablesResponse]]: - r"""Return a callable for the list tables method over gRPC. - - Lists all tables served from a specified instance. - - Returns: - Callable[[~.ListTablesRequest], - Awaitable[~.ListTablesResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_tables' not in self._stubs: - self._stubs['list_tables'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/ListTables', - request_serializer=bigtable_table_admin.ListTablesRequest.serialize, - response_deserializer=bigtable_table_admin.ListTablesResponse.deserialize, - ) - return self._stubs['list_tables'] - - @property - def get_table(self) -> Callable[ - [bigtable_table_admin.GetTableRequest], - Awaitable[table.Table]]: - r"""Return a callable for the get table method over gRPC. - - Gets metadata information about the specified table. - - Returns: - Callable[[~.GetTableRequest], - Awaitable[~.Table]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_table' not in self._stubs: - self._stubs['get_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GetTable', - request_serializer=bigtable_table_admin.GetTableRequest.serialize, - response_deserializer=table.Table.deserialize, - ) - return self._stubs['get_table'] - - @property - def update_table(self) -> Callable[ - [bigtable_table_admin.UpdateTableRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the update table method over gRPC. - - Updates a specified table. - - Returns: - Callable[[~.UpdateTableRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_table' not in self._stubs: - self._stubs['update_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/UpdateTable', - request_serializer=bigtable_table_admin.UpdateTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['update_table'] - - @property - def delete_table(self) -> Callable[ - [bigtable_table_admin.DeleteTableRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete table method over gRPC. - - Permanently deletes a specified table and all of its - data. - - Returns: - Callable[[~.DeleteTableRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_table' not in self._stubs: - self._stubs['delete_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteTable', - request_serializer=bigtable_table_admin.DeleteTableRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_table'] - - @property - def undelete_table(self) -> Callable[ - [bigtable_table_admin.UndeleteTableRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the undelete table method over gRPC. - - Restores a specified table which was accidentally - deleted. - - Returns: - Callable[[~.UndeleteTableRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'undelete_table' not in self._stubs: - self._stubs['undelete_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/UndeleteTable', - request_serializer=bigtable_table_admin.UndeleteTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['undelete_table'] - - @property - def modify_column_families(self) -> Callable[ - [bigtable_table_admin.ModifyColumnFamiliesRequest], - Awaitable[table.Table]]: - r"""Return a callable for the modify column families method over gRPC. - - Performs a series of column family modifications on - the specified table. Either all or none of the - modifications will occur before this method returns, but - data requests received prior to that point may see a - table where only some modifications have taken effect. - - Returns: - Callable[[~.ModifyColumnFamiliesRequest], - Awaitable[~.Table]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'modify_column_families' not in self._stubs: - self._stubs['modify_column_families'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/ModifyColumnFamilies', - request_serializer=bigtable_table_admin.ModifyColumnFamiliesRequest.serialize, - response_deserializer=table.Table.deserialize, - ) - return self._stubs['modify_column_families'] - - @property - def drop_row_range(self) -> Callable[ - [bigtable_table_admin.DropRowRangeRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the drop row range method over gRPC. - - Permanently drop/delete a row range from a specified - table. The request can specify whether to delete all - rows in a table, or only those that match a particular - prefix. - - Returns: - Callable[[~.DropRowRangeRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'drop_row_range' not in self._stubs: - self._stubs['drop_row_range'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/DropRowRange', - request_serializer=bigtable_table_admin.DropRowRangeRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['drop_row_range'] - - @property - def generate_consistency_token(self) -> Callable[ - [bigtable_table_admin.GenerateConsistencyTokenRequest], - Awaitable[bigtable_table_admin.GenerateConsistencyTokenResponse]]: - r"""Return a callable for the generate consistency token method over gRPC. - - Generates a consistency token for a Table, which can - be used in CheckConsistency to check whether mutations - to the table that finished before this call started have - been replicated. The tokens will be available for 90 - days. - - Returns: - Callable[[~.GenerateConsistencyTokenRequest], - Awaitable[~.GenerateConsistencyTokenResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'generate_consistency_token' not in self._stubs: - self._stubs['generate_consistency_token'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GenerateConsistencyToken', - request_serializer=bigtable_table_admin.GenerateConsistencyTokenRequest.serialize, - response_deserializer=bigtable_table_admin.GenerateConsistencyTokenResponse.deserialize, - ) - return self._stubs['generate_consistency_token'] - - @property - def check_consistency(self) -> Callable[ - [bigtable_table_admin.CheckConsistencyRequest], - Awaitable[bigtable_table_admin.CheckConsistencyResponse]]: - r"""Return a callable for the check consistency method over gRPC. - - Checks replication consistency based on a consistency - token, that is, if replication has caught up based on - the conditions specified in the token and the check - request. - - Returns: - Callable[[~.CheckConsistencyRequest], - Awaitable[~.CheckConsistencyResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'check_consistency' not in self._stubs: - self._stubs['check_consistency'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CheckConsistency', - request_serializer=bigtable_table_admin.CheckConsistencyRequest.serialize, - response_deserializer=bigtable_table_admin.CheckConsistencyResponse.deserialize, - ) - return self._stubs['check_consistency'] - - @property - def snapshot_table(self) -> Callable[ - [bigtable_table_admin.SnapshotTableRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the snapshot table method over gRPC. - - Creates a new snapshot in the specified cluster from - the specified source table. The cluster and the table - must be in the same instance. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.SnapshotTableRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'snapshot_table' not in self._stubs: - self._stubs['snapshot_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/SnapshotTable', - request_serializer=bigtable_table_admin.SnapshotTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['snapshot_table'] - - @property - def get_snapshot(self) -> Callable[ - [bigtable_table_admin.GetSnapshotRequest], - Awaitable[table.Snapshot]]: - r"""Return a callable for the get snapshot method over gRPC. - - Gets metadata information about the specified - snapshot. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.GetSnapshotRequest], - Awaitable[~.Snapshot]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_snapshot' not in self._stubs: - self._stubs['get_snapshot'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GetSnapshot', - request_serializer=bigtable_table_admin.GetSnapshotRequest.serialize, - response_deserializer=table.Snapshot.deserialize, - ) - return self._stubs['get_snapshot'] - - @property - def list_snapshots(self) -> Callable[ - [bigtable_table_admin.ListSnapshotsRequest], - Awaitable[bigtable_table_admin.ListSnapshotsResponse]]: - r"""Return a callable for the list snapshots method over gRPC. - - Lists all snapshots associated with the specified - cluster. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.ListSnapshotsRequest], - Awaitable[~.ListSnapshotsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_snapshots' not in self._stubs: - self._stubs['list_snapshots'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/ListSnapshots', - request_serializer=bigtable_table_admin.ListSnapshotsRequest.serialize, - response_deserializer=bigtable_table_admin.ListSnapshotsResponse.deserialize, - ) - return self._stubs['list_snapshots'] - - @property - def delete_snapshot(self) -> Callable[ - [bigtable_table_admin.DeleteSnapshotRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete snapshot method over gRPC. - - Permanently deletes the specified snapshot. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - Returns: - Callable[[~.DeleteSnapshotRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_snapshot' not in self._stubs: - self._stubs['delete_snapshot'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteSnapshot', - request_serializer=bigtable_table_admin.DeleteSnapshotRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_snapshot'] - - @property - def create_backup(self) -> Callable[ - [bigtable_table_admin.CreateBackupRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the create backup method over gRPC. - - Starts creating a new Cloud Bigtable Backup. The returned backup - [long-running operation][google.longrunning.Operation] can be - used to track creation of the backup. The - [metadata][google.longrunning.Operation.metadata] field type is - [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. - The [response][google.longrunning.Operation.response] field type - is [Backup][google.bigtable.admin.v2.Backup], if successful. - Cancelling the returned operation will stop the creation and - delete the backup. - - Returns: - Callable[[~.CreateBackupRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'create_backup' not in self._stubs: - self._stubs['create_backup'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CreateBackup', - request_serializer=bigtable_table_admin.CreateBackupRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['create_backup'] - - @property - def get_backup(self) -> Callable[ - [bigtable_table_admin.GetBackupRequest], - Awaitable[table.Backup]]: - r"""Return a callable for the get backup method over gRPC. - - Gets metadata on a pending or completed Cloud - Bigtable Backup. - - Returns: - Callable[[~.GetBackupRequest], - Awaitable[~.Backup]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_backup' not in self._stubs: - self._stubs['get_backup'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GetBackup', - request_serializer=bigtable_table_admin.GetBackupRequest.serialize, - response_deserializer=table.Backup.deserialize, - ) - return self._stubs['get_backup'] - - @property - def update_backup(self) -> Callable[ - [bigtable_table_admin.UpdateBackupRequest], - Awaitable[table.Backup]]: - r"""Return a callable for the update backup method over gRPC. - - Updates a pending or completed Cloud Bigtable Backup. - - Returns: - Callable[[~.UpdateBackupRequest], - Awaitable[~.Backup]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'update_backup' not in self._stubs: - self._stubs['update_backup'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/UpdateBackup', - request_serializer=bigtable_table_admin.UpdateBackupRequest.serialize, - response_deserializer=table.Backup.deserialize, - ) - return self._stubs['update_backup'] - - @property - def delete_backup(self) -> Callable[ - [bigtable_table_admin.DeleteBackupRequest], - Awaitable[empty_pb2.Empty]]: - r"""Return a callable for the delete backup method over gRPC. - - Deletes a pending or completed Cloud Bigtable backup. - - Returns: - Callable[[~.DeleteBackupRequest], - Awaitable[~.Empty]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'delete_backup' not in self._stubs: - self._stubs['delete_backup'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/DeleteBackup', - request_serializer=bigtable_table_admin.DeleteBackupRequest.serialize, - response_deserializer=empty_pb2.Empty.FromString, - ) - return self._stubs['delete_backup'] - - @property - def list_backups(self) -> Callable[ - [bigtable_table_admin.ListBackupsRequest], - Awaitable[bigtable_table_admin.ListBackupsResponse]]: - r"""Return a callable for the list backups method over gRPC. - - Lists Cloud Bigtable backups. Returns both completed - and pending backups. - - Returns: - Callable[[~.ListBackupsRequest], - Awaitable[~.ListBackupsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'list_backups' not in self._stubs: - self._stubs['list_backups'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/ListBackups', - request_serializer=bigtable_table_admin.ListBackupsRequest.serialize, - response_deserializer=bigtable_table_admin.ListBackupsResponse.deserialize, - ) - return self._stubs['list_backups'] - - @property - def restore_table(self) -> Callable[ - [bigtable_table_admin.RestoreTableRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the restore table method over gRPC. - - Create a new table by restoring from a completed backup. The - returned table [long-running - operation][google.longrunning.Operation] can be used to track - the progress of the operation, and to cancel it. The - [metadata][google.longrunning.Operation.metadata] field type is - [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. - The [response][google.longrunning.Operation.response] type is - [Table][google.bigtable.admin.v2.Table], if successful. - - Returns: - Callable[[~.RestoreTableRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'restore_table' not in self._stubs: - self._stubs['restore_table'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/RestoreTable', - request_serializer=bigtable_table_admin.RestoreTableRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['restore_table'] - - @property - def copy_backup(self) -> Callable[ - [bigtable_table_admin.CopyBackupRequest], - Awaitable[operations_pb2.Operation]]: - r"""Return a callable for the copy backup method over gRPC. - - Copy a Cloud Bigtable backup to a new backup in the - destination cluster located in the destination instance - and project. - - Returns: - Callable[[~.CopyBackupRequest], - Awaitable[~.Operation]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'copy_backup' not in self._stubs: - self._stubs['copy_backup'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/CopyBackup', - request_serializer=bigtable_table_admin.CopyBackupRequest.serialize, - response_deserializer=operations_pb2.Operation.FromString, - ) - return self._stubs['copy_backup'] - - @property - def get_iam_policy(self) -> Callable[ - [iam_policy_pb2.GetIamPolicyRequest], - Awaitable[policy_pb2.Policy]]: - r"""Return a callable for the get iam policy method over gRPC. - - Gets the access control policy for a Table or Backup - resource. Returns an empty policy if the resource exists - but does not have a policy set. - - Returns: - Callable[[~.GetIamPolicyRequest], - Awaitable[~.Policy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'get_iam_policy' not in self._stubs: - self._stubs['get_iam_policy'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/GetIamPolicy', - request_serializer=iam_policy_pb2.GetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs['get_iam_policy'] - - @property - def set_iam_policy(self) -> Callable[ - [iam_policy_pb2.SetIamPolicyRequest], - Awaitable[policy_pb2.Policy]]: - r"""Return a callable for the set iam policy method over gRPC. - - Sets the access control policy on a Table or Backup - resource. Replaces any existing policy. - - Returns: - Callable[[~.SetIamPolicyRequest], - Awaitable[~.Policy]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'set_iam_policy' not in self._stubs: - self._stubs['set_iam_policy'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/SetIamPolicy', - request_serializer=iam_policy_pb2.SetIamPolicyRequest.SerializeToString, - response_deserializer=policy_pb2.Policy.FromString, - ) - return self._stubs['set_iam_policy'] - - @property - def test_iam_permissions(self) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - Awaitable[iam_policy_pb2.TestIamPermissionsResponse]]: - r"""Return a callable for the test iam permissions method over gRPC. - - Returns permissions that the caller has on the - specified Table or Backup resource. - - Returns: - Callable[[~.TestIamPermissionsRequest], - Awaitable[~.TestIamPermissionsResponse]]: - A function that, when called, will call the underlying RPC - on the server. - """ - # Generate a "stub function" on-the-fly which will actually make - # the request. - # gRPC handles serialization and deserialization, so we just need - # to pass in the functions for each. - if 'test_iam_permissions' not in self._stubs: - self._stubs['test_iam_permissions'] = self.grpc_channel.unary_unary( - '/google.bigtable.admin.v2.BigtableTableAdmin/TestIamPermissions', - request_serializer=iam_policy_pb2.TestIamPermissionsRequest.SerializeToString, - response_deserializer=iam_policy_pb2.TestIamPermissionsResponse.FromString, - ) - return self._stubs['test_iam_permissions'] - - def close(self): - return self.grpc_channel.close() - - -__all__ = ( - 'BigtableTableAdminGrpcAsyncIOTransport', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py deleted file mode 100644 index c286fbb46..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/services/bigtable_table_admin/transports/rest.py +++ /dev/null @@ -1,3310 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -from google.auth.transport.requests import AuthorizedSession # type: ignore -import json # type: ignore -import grpc # type: ignore -from google.auth.transport.grpc import SslCredentials # type: ignore -from google.auth import credentials as ga_credentials # type: ignore -from google.api_core import exceptions as core_exceptions -from google.api_core import retry as retries -from google.api_core import rest_helpers -from google.api_core import rest_streaming -from google.api_core import path_template -from google.api_core import gapic_v1 - -from google.protobuf import json_format -from google.api_core import operations_v1 -from requests import __version__ as requests_version -import dataclasses -import re -from typing import Any, Callable, Dict, List, Optional, Sequence, Tuple, Union -import warnings - -try: - OptionalRetry = Union[retries.Retry, gapic_v1.method._MethodDefault, None] -except AttributeError: # pragma: NO COVER - OptionalRetry = Union[retries.Retry, object, None] # type: ignore - - -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.protobuf import empty_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore - -from .base import BigtableTableAdminTransport, DEFAULT_CLIENT_INFO as BASE_DEFAULT_CLIENT_INFO - - -DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo( - gapic_version=BASE_DEFAULT_CLIENT_INFO.gapic_version, - grpc_version=None, - rest_version=requests_version, -) - - -class BigtableTableAdminRestInterceptor: - """Interceptor for BigtableTableAdmin. - - Interceptors are used to manipulate requests, request metadata, and responses - in arbitrary ways. - Example use cases include: - * Logging - * Verifying requests according to service or custom semantics - * Stripping extraneous information from responses - - These use cases and more can be enabled by injecting an - instance of a custom subclass when constructing the BigtableTableAdminRestTransport. - - .. code-block:: python - class MyCustomBigtableTableAdminInterceptor(BigtableTableAdminRestInterceptor): - def pre_check_consistency(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_check_consistency(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_copy_backup(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_copy_backup(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_backup(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_backup(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_table(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_create_table_from_snapshot(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_create_table_from_snapshot(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_delete_backup(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_delete_snapshot(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_delete_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_drop_row_range(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def pre_generate_consistency_token(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_generate_consistency_token(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_backup(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_backup(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_iam_policy(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_iam_policy(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_snapshot(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_snapshot(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_get_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_get_table(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_backups(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_backups(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_snapshots(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_snapshots(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_list_tables(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_list_tables(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_modify_column_families(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_modify_column_families(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_restore_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_restore_table(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_set_iam_policy(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_set_iam_policy(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_snapshot_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_snapshot_table(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_test_iam_permissions(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_test_iam_permissions(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_undelete_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_undelete_table(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_backup(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_backup(self, response): - logging.log(f"Received response: {response}") - return response - - def pre_update_table(self, request, metadata): - logging.log(f"Received request: {request}") - return request, metadata - - def post_update_table(self, response): - logging.log(f"Received response: {response}") - return response - - transport = BigtableTableAdminRestTransport(interceptor=MyCustomBigtableTableAdminInterceptor()) - client = BigtableTableAdminClient(transport=transport) - - - """ - def pre_check_consistency(self, request: bigtable_table_admin.CheckConsistencyRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.CheckConsistencyRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for check_consistency - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_check_consistency(self, response: bigtable_table_admin.CheckConsistencyResponse) -> bigtable_table_admin.CheckConsistencyResponse: - """Post-rpc interceptor for check_consistency - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_copy_backup(self, request: bigtable_table_admin.CopyBackupRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.CopyBackupRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for copy_backup - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_copy_backup(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for copy_backup - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_create_backup(self, request: bigtable_table_admin.CreateBackupRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.CreateBackupRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for create_backup - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_create_backup(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for create_backup - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_create_table(self, request: bigtable_table_admin.CreateTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.CreateTableRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for create_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_create_table(self, response: gba_table.Table) -> gba_table.Table: - """Post-rpc interceptor for create_table - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_create_table_from_snapshot(self, request: bigtable_table_admin.CreateTableFromSnapshotRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.CreateTableFromSnapshotRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for create_table_from_snapshot - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_create_table_from_snapshot(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for create_table_from_snapshot - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_delete_backup(self, request: bigtable_table_admin.DeleteBackupRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.DeleteBackupRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for delete_backup - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def pre_delete_snapshot(self, request: bigtable_table_admin.DeleteSnapshotRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.DeleteSnapshotRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for delete_snapshot - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def pre_delete_table(self, request: bigtable_table_admin.DeleteTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.DeleteTableRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for delete_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def pre_drop_row_range(self, request: bigtable_table_admin.DropRowRangeRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.DropRowRangeRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for drop_row_range - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def pre_generate_consistency_token(self, request: bigtable_table_admin.GenerateConsistencyTokenRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.GenerateConsistencyTokenRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for generate_consistency_token - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_generate_consistency_token(self, response: bigtable_table_admin.GenerateConsistencyTokenResponse) -> bigtable_table_admin.GenerateConsistencyTokenResponse: - """Post-rpc interceptor for generate_consistency_token - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_get_backup(self, request: bigtable_table_admin.GetBackupRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.GetBackupRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_backup - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_get_backup(self, response: table.Backup) -> table.Backup: - """Post-rpc interceptor for get_backup - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_get_iam_policy(self, request: iam_policy_pb2.GetIamPolicyRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[iam_policy_pb2.GetIamPolicyRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_iam_policy - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_get_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: - """Post-rpc interceptor for get_iam_policy - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_get_snapshot(self, request: bigtable_table_admin.GetSnapshotRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.GetSnapshotRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_snapshot - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_get_snapshot(self, response: table.Snapshot) -> table.Snapshot: - """Post-rpc interceptor for get_snapshot - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_get_table(self, request: bigtable_table_admin.GetTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.GetTableRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for get_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_get_table(self, response: table.Table) -> table.Table: - """Post-rpc interceptor for get_table - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_list_backups(self, request: bigtable_table_admin.ListBackupsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.ListBackupsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_backups - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_list_backups(self, response: bigtable_table_admin.ListBackupsResponse) -> bigtable_table_admin.ListBackupsResponse: - """Post-rpc interceptor for list_backups - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_list_snapshots(self, request: bigtable_table_admin.ListSnapshotsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.ListSnapshotsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_snapshots - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_list_snapshots(self, response: bigtable_table_admin.ListSnapshotsResponse) -> bigtable_table_admin.ListSnapshotsResponse: - """Post-rpc interceptor for list_snapshots - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_list_tables(self, request: bigtable_table_admin.ListTablesRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.ListTablesRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for list_tables - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_list_tables(self, response: bigtable_table_admin.ListTablesResponse) -> bigtable_table_admin.ListTablesResponse: - """Post-rpc interceptor for list_tables - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_modify_column_families(self, request: bigtable_table_admin.ModifyColumnFamiliesRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.ModifyColumnFamiliesRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for modify_column_families - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_modify_column_families(self, response: table.Table) -> table.Table: - """Post-rpc interceptor for modify_column_families - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_restore_table(self, request: bigtable_table_admin.RestoreTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.RestoreTableRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for restore_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_restore_table(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for restore_table - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_set_iam_policy(self, request: iam_policy_pb2.SetIamPolicyRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[iam_policy_pb2.SetIamPolicyRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for set_iam_policy - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_set_iam_policy(self, response: policy_pb2.Policy) -> policy_pb2.Policy: - """Post-rpc interceptor for set_iam_policy - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_snapshot_table(self, request: bigtable_table_admin.SnapshotTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.SnapshotTableRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for snapshot_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_snapshot_table(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for snapshot_table - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_test_iam_permissions(self, request: iam_policy_pb2.TestIamPermissionsRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[iam_policy_pb2.TestIamPermissionsRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for test_iam_permissions - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_test_iam_permissions(self, response: iam_policy_pb2.TestIamPermissionsResponse) -> iam_policy_pb2.TestIamPermissionsResponse: - """Post-rpc interceptor for test_iam_permissions - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_undelete_table(self, request: bigtable_table_admin.UndeleteTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.UndeleteTableRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for undelete_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_undelete_table(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for undelete_table - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_update_backup(self, request: bigtable_table_admin.UpdateBackupRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.UpdateBackupRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for update_backup - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_update_backup(self, response: table.Backup) -> table.Backup: - """Post-rpc interceptor for update_backup - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - def pre_update_table(self, request: bigtable_table_admin.UpdateTableRequest, metadata: Sequence[Tuple[str, str]]) -> Tuple[bigtable_table_admin.UpdateTableRequest, Sequence[Tuple[str, str]]]: - """Pre-rpc interceptor for update_table - - Override in a subclass to manipulate the request or metadata - before they are sent to the BigtableTableAdmin server. - """ - return request, metadata - - def post_update_table(self, response: operations_pb2.Operation) -> operations_pb2.Operation: - """Post-rpc interceptor for update_table - - Override in a subclass to manipulate the response - after it is returned by the BigtableTableAdmin server but before - it is returned to user code. - """ - return response - - -@dataclasses.dataclass -class BigtableTableAdminRestStub: - _session: AuthorizedSession - _host: str - _interceptor: BigtableTableAdminRestInterceptor - - -class BigtableTableAdminRestTransport(BigtableTableAdminTransport): - """REST backend transport for BigtableTableAdmin. - - Service for creating, configuring, and deleting Cloud - Bigtable tables. - - Provides access to the table schemas only, not the data stored - within the tables. - - This class defines the same methods as the primary client, so the - primary client can load the underlying transport implementation - and call it. - - It sends JSON representations of protocol buffers over HTTP/1.1 - - """ - - def __init__(self, *, - host: str = 'bigtableadmin.googleapis.com', - credentials: Optional[ga_credentials.Credentials] = None, - credentials_file: Optional[str] = None, - scopes: Optional[Sequence[str]] = None, - client_cert_source_for_mtls: Optional[Callable[[ - ], Tuple[bytes, bytes]]] = None, - quota_project_id: Optional[str] = None, - client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO, - always_use_jwt_access: Optional[bool] = False, - url_scheme: str = 'https', - interceptor: Optional[BigtableTableAdminRestInterceptor] = None, - api_audience: Optional[str] = None, - ) -> None: - """Instantiate the transport. - - Args: - host (Optional[str]): - The hostname to connect to (default: 'bigtableadmin.googleapis.com'). - credentials (Optional[google.auth.credentials.Credentials]): The - authorization credentials to attach to requests. These - credentials identify the application to the service; if none - are specified, the client will attempt to ascertain the - credentials from the environment. - - credentials_file (Optional[str]): A file with credentials that can - be loaded with :func:`google.auth.load_credentials_from_file`. - This argument is ignored if ``channel`` is provided. - scopes (Optional(Sequence[str])): A list of scopes. This argument is - ignored if ``channel`` is provided. - client_cert_source_for_mtls (Callable[[], Tuple[bytes, bytes]]): Client - certificate to configure mutual TLS HTTP channel. It is ignored - if ``channel`` is provided. - quota_project_id (Optional[str]): An optional project to use for billing - and quota. - client_info (google.api_core.gapic_v1.client_info.ClientInfo): - The client info used to send a user-agent string along with - API requests. If ``None``, then default info will be used. - Generally, you only need to set this if you are developing - your own client library. - always_use_jwt_access (Optional[bool]): Whether self signed JWT should - be used for service account credentials. - url_scheme: the protocol scheme for the API endpoint. Normally - "https", but for testing or local servers, - "http" can be specified. - """ - # Run the base constructor - # TODO(yon-mg): resolve other ctor params i.e. scopes, quota, etc. - # TODO: When custom host (api_endpoint) is set, `scopes` must *also* be set on the - # credentials object - maybe_url_match = re.match("^(?Phttp(?:s)?://)?(?P.*)$", host) - if maybe_url_match is None: - raise ValueError(f"Unexpected hostname structure: {host}") # pragma: NO COVER - - url_match_items = maybe_url_match.groupdict() - - host = f"{url_scheme}://{host}" if not url_match_items["scheme"] else host - - super().__init__( - host=host, - credentials=credentials, - client_info=client_info, - always_use_jwt_access=always_use_jwt_access, - api_audience=api_audience - ) - self._session = AuthorizedSession( - self._credentials, default_host=self.DEFAULT_HOST) - self._operations_client: Optional[operations_v1.AbstractOperationsClient] = None - if client_cert_source_for_mtls: - self._session.configure_mtls_channel(client_cert_source_for_mtls) - self._interceptor = interceptor or BigtableTableAdminRestInterceptor() - self._prep_wrapped_messages(client_info) - - @property - def operations_client(self) -> operations_v1.AbstractOperationsClient: - """Create the client designed to process long-running operations. - - This property caches on the instance; repeated calls return the same - client. - """ - # Only create a new client if we do not already have one. - if self._operations_client is None: - http_options: Dict[str, List[Dict[str, str]]] = { - 'google.longrunning.Operations.CancelOperation': [ - { - 'method': 'post', - 'uri': '/v2/{name=operations/**}:cancel', - }, - ], - 'google.longrunning.Operations.DeleteOperation': [ - { - 'method': 'delete', - 'uri': '/v2/{name=operations/**}', - }, - ], - 'google.longrunning.Operations.GetOperation': [ - { - 'method': 'get', - 'uri': '/v2/{name=operations/**}', - }, - ], - 'google.longrunning.Operations.ListOperations': [ - { - 'method': 'get', - 'uri': '/v2/{name=operations/projects/**}/operations', - }, - ], - } - - rest_transport = operations_v1.OperationsRestTransport( - host=self._host, - # use the credentials which are saved - credentials=self._credentials, - scopes=self._scopes, - http_options=http_options, - path_prefix="v2") - - self._operations_client = operations_v1.AbstractOperationsClient(transport=rest_transport) - - # Return the client from cache. - return self._operations_client - - class _CheckConsistency(BigtableTableAdminRestStub): - def __hash__(self): - return hash("CheckConsistency") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.CheckConsistencyRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> bigtable_table_admin.CheckConsistencyResponse: - r"""Call the check consistency method over HTTP. - - Args: - request (~.bigtable_table_admin.CheckConsistencyRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable_table_admin.CheckConsistencyResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{name=projects/*/instances/*/tables/*}:checkConsistency', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_check_consistency(request, metadata) - pb_request = bigtable_table_admin.CheckConsistencyRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_table_admin.CheckConsistencyResponse() - pb_resp = bigtable_table_admin.CheckConsistencyResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_check_consistency(resp) - return resp - - class _CopyBackup(BigtableTableAdminRestStub): - def __hash__(self): - return hash("CopyBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.CopyBackupRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the copy backup method over HTTP. - - Args: - request (~.bigtable_table_admin.CopyBackupRequest): - The request object. The request for - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_copy_backup(request, metadata) - pb_request = bigtable_table_admin.CopyBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_copy_backup(resp) - return resp - - class _CreateBackup(BigtableTableAdminRestStub): - def __hash__(self): - return hash("CreateBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "backupId" : "", } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.CreateBackupRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the create backup method over HTTP. - - Args: - request (~.bigtable_table_admin.CreateBackupRequest): - The request object. The request for - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{parent=projects/*/instances/*/clusters/*}/backups', - 'body': 'backup', - }, - ] - request, metadata = self._interceptor.pre_create_backup(request, metadata) - pb_request = bigtable_table_admin.CreateBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_create_backup(resp) - return resp - - class _CreateTable(BigtableTableAdminRestStub): - def __hash__(self): - return hash("CreateTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.CreateTableRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> gba_table.Table: - r"""Call the create table method over HTTP. - - Args: - request (~.bigtable_table_admin.CreateTableRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.gba_table.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{parent=projects/*/instances/*}/tables', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_create_table(request, metadata) - pb_request = bigtable_table_admin.CreateTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = gba_table.Table() - pb_resp = gba_table.Table.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_create_table(resp) - return resp - - class _CreateTableFromSnapshot(BigtableTableAdminRestStub): - def __hash__(self): - return hash("CreateTableFromSnapshot") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.CreateTableFromSnapshotRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the create table from - snapshot method over HTTP. - - Args: - request (~.bigtable_table_admin.CreateTableFromSnapshotRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_create_table_from_snapshot(request, metadata) - pb_request = bigtable_table_admin.CreateTableFromSnapshotRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_create_table_from_snapshot(resp) - return resp - - class _DeleteBackup(BigtableTableAdminRestStub): - def __hash__(self): - return hash("DeleteBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.DeleteBackupRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ): - r"""Call the delete backup method over HTTP. - - Args: - request (~.bigtable_table_admin.DeleteBackupRequest): - The request object. The request for - [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'delete', - 'uri': '/v2/{name=projects/*/instances/*/clusters/*/backups/*}', - }, - ] - request, metadata = self._interceptor.pre_delete_backup(request, metadata) - pb_request = bigtable_table_admin.DeleteBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DeleteSnapshot(BigtableTableAdminRestStub): - def __hash__(self): - return hash("DeleteSnapshot") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.DeleteSnapshotRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ): - r"""Call the delete snapshot method over HTTP. - - Args: - request (~.bigtable_table_admin.DeleteSnapshotRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'delete', - 'uri': '/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}', - }, - ] - request, metadata = self._interceptor.pre_delete_snapshot(request, metadata) - pb_request = bigtable_table_admin.DeleteSnapshotRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DeleteTable(BigtableTableAdminRestStub): - def __hash__(self): - return hash("DeleteTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.DeleteTableRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ): - r"""Call the delete table method over HTTP. - - Args: - request (~.bigtable_table_admin.DeleteTableRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'delete', - 'uri': '/v2/{name=projects/*/instances/*/tables/*}', - }, - ] - request, metadata = self._interceptor.pre_delete_table(request, metadata) - pb_request = bigtable_table_admin.DeleteTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _DropRowRange(BigtableTableAdminRestStub): - def __hash__(self): - return hash("DropRowRange") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.DropRowRangeRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ): - r"""Call the drop row range method over HTTP. - - Args: - request (~.bigtable_table_admin.DropRowRangeRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{name=projects/*/instances/*/tables/*}:dropRowRange', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_drop_row_range(request, metadata) - pb_request = bigtable_table_admin.DropRowRangeRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - class _GenerateConsistencyToken(BigtableTableAdminRestStub): - def __hash__(self): - return hash("GenerateConsistencyToken") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.GenerateConsistencyTokenRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> bigtable_table_admin.GenerateConsistencyTokenResponse: - r"""Call the generate consistency - token method over HTTP. - - Args: - request (~.bigtable_table_admin.GenerateConsistencyTokenRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable_table_admin.GenerateConsistencyTokenResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_generate_consistency_token(request, metadata) - pb_request = bigtable_table_admin.GenerateConsistencyTokenRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_table_admin.GenerateConsistencyTokenResponse() - pb_resp = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_generate_consistency_token(resp) - return resp - - class _GetBackup(BigtableTableAdminRestStub): - def __hash__(self): - return hash("GetBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.GetBackupRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> table.Backup: - r"""Call the get backup method over HTTP. - - Args: - request (~.bigtable_table_admin.GetBackupRequest): - The request object. The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.table.Backup: - A backup of a Cloud Bigtable table. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{name=projects/*/instances/*/clusters/*/backups/*}', - }, - ] - request, metadata = self._interceptor.pre_get_backup(request, metadata) - pb_request = bigtable_table_admin.GetBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table.Backup() - pb_resp = table.Backup.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_backup(resp) - return resp - - class _GetIamPolicy(BigtableTableAdminRestStub): - def __hash__(self): - return hash("GetIamPolicy") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: iam_policy_pb2.GetIamPolicyRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> policy_pb2.Policy: - r"""Call the get iam policy method over HTTP. - - Args: - request (~.iam_policy_pb2.GetIamPolicyRequest): - The request object. Request message for ``GetIamPolicy`` method. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which - specifies access controls for Google Cloud resources. - - A ``Policy`` is a collection of ``bindings``. A - ``binding`` binds one or more ``members``, or - principals, to a single ``role``. Principals can be user - accounts, service accounts, Google groups, and domains - (such as G Suite). A ``role`` is a named list of - permissions; each ``role`` can be an IAM predefined role - or a user-created custom role. - - For some types of Google Cloud resources, a ``binding`` - can also specify a ``condition``, which is a logical - expression that allows access to a resource only if the - expression evaluates to ``true``. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the `IAM - documentation `__. - - **JSON example:** - - :: - - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } - - **YAML example:** - - :: - - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 - - For a description of IAM and its features, see the `IAM - documentation `__. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy', - 'body': '*', - }, -{ - 'method': 'post', - 'uri': '/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:getIamPolicy', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_get_iam_policy(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = policy_pb2.Policy() - pb_resp = resp - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_iam_policy(resp) - return resp - - class _GetSnapshot(BigtableTableAdminRestStub): - def __hash__(self): - return hash("GetSnapshot") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.GetSnapshotRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> table.Snapshot: - r"""Call the get snapshot method over HTTP. - - Args: - request (~.bigtable_table_admin.GetSnapshotRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.table.Snapshot: - A snapshot of a table at a particular - time. A snapshot can be used as a - checkpoint for data restoration or a - data source for a new table. - - Note: This is a private alpha release of - Cloud Bigtable snapshots. This feature - is not currently available to most Cloud - Bigtable customers. This feature might - be changed in backward-incompatible ways - and is not recommended for production - use. It is not subject to any SLA or - deprecation policy. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}', - }, - ] - request, metadata = self._interceptor.pre_get_snapshot(request, metadata) - pb_request = bigtable_table_admin.GetSnapshotRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table.Snapshot() - pb_resp = table.Snapshot.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_snapshot(resp) - return resp - - class _GetTable(BigtableTableAdminRestStub): - def __hash__(self): - return hash("GetTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.GetTableRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> table.Table: - r"""Call the get table method over HTTP. - - Args: - request (~.bigtable_table_admin.GetTableRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.table.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{name=projects/*/instances/*/tables/*}', - }, - ] - request, metadata = self._interceptor.pre_get_table(request, metadata) - pb_request = bigtable_table_admin.GetTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table.Table() - pb_resp = table.Table.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_get_table(resp) - return resp - - class _ListBackups(BigtableTableAdminRestStub): - def __hash__(self): - return hash("ListBackups") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.ListBackupsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> bigtable_table_admin.ListBackupsResponse: - r"""Call the list backups method over HTTP. - - Args: - request (~.bigtable_table_admin.ListBackupsRequest): - The request object. The request for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable_table_admin.ListBackupsResponse: - The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{parent=projects/*/instances/*/clusters/*}/backups', - }, - ] - request, metadata = self._interceptor.pre_list_backups(request, metadata) - pb_request = bigtable_table_admin.ListBackupsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_table_admin.ListBackupsResponse() - pb_resp = bigtable_table_admin.ListBackupsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_backups(resp) - return resp - - class _ListSnapshots(BigtableTableAdminRestStub): - def __hash__(self): - return hash("ListSnapshots") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.ListSnapshotsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> bigtable_table_admin.ListSnapshotsResponse: - r"""Call the list snapshots method over HTTP. - - Args: - request (~.bigtable_table_admin.ListSnapshotsRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable_table_admin.ListSnapshotsResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{parent=projects/*/instances/*/clusters/*}/snapshots', - }, - ] - request, metadata = self._interceptor.pre_list_snapshots(request, metadata) - pb_request = bigtable_table_admin.ListSnapshotsRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_table_admin.ListSnapshotsResponse() - pb_resp = bigtable_table_admin.ListSnapshotsResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_snapshots(resp) - return resp - - class _ListTables(BigtableTableAdminRestStub): - def __hash__(self): - return hash("ListTables") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.ListTablesRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> bigtable_table_admin.ListTablesResponse: - r"""Call the list tables method over HTTP. - - Args: - request (~.bigtable_table_admin.ListTablesRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.bigtable_table_admin.ListTablesResponse: - Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'get', - 'uri': '/v2/{parent=projects/*/instances/*}/tables', - }, - ] - request, metadata = self._interceptor.pre_list_tables(request, metadata) - pb_request = bigtable_table_admin.ListTablesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = bigtable_table_admin.ListTablesResponse() - pb_resp = bigtable_table_admin.ListTablesResponse.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_list_tables(resp) - return resp - - class _ModifyColumnFamilies(BigtableTableAdminRestStub): - def __hash__(self): - return hash("ModifyColumnFamilies") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.ModifyColumnFamiliesRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> table.Table: - r"""Call the modify column families method over HTTP. - - Args: - request (~.bigtable_table_admin.ModifyColumnFamiliesRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.table.Table: - A collection of user data indexed by - row, column, and timestamp. Each table - is served using the resources of its - parent cluster. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_modify_column_families(request, metadata) - pb_request = bigtable_table_admin.ModifyColumnFamiliesRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table.Table() - pb_resp = table.Table.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_modify_column_families(resp) - return resp - - class _RestoreTable(BigtableTableAdminRestStub): - def __hash__(self): - return hash("RestoreTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.RestoreTableRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the restore table method over HTTP. - - Args: - request (~.bigtable_table_admin.RestoreTableRequest): - The request object. The request for - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{parent=projects/*/instances/*}/tables:restore', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_restore_table(request, metadata) - pb_request = bigtable_table_admin.RestoreTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_restore_table(resp) - return resp - - class _SetIamPolicy(BigtableTableAdminRestStub): - def __hash__(self): - return hash("SetIamPolicy") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: iam_policy_pb2.SetIamPolicyRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> policy_pb2.Policy: - r"""Call the set iam policy method over HTTP. - - Args: - request (~.iam_policy_pb2.SetIamPolicyRequest): - The request object. Request message for ``SetIamPolicy`` method. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.policy_pb2.Policy: - An Identity and Access Management (IAM) policy, which - specifies access controls for Google Cloud resources. - - A ``Policy`` is a collection of ``bindings``. A - ``binding`` binds one or more ``members``, or - principals, to a single ``role``. Principals can be user - accounts, service accounts, Google groups, and domains - (such as G Suite). A ``role`` is a named list of - permissions; each ``role`` can be an IAM predefined role - or a user-created custom role. - - For some types of Google Cloud resources, a ``binding`` - can also specify a ``condition``, which is a logical - expression that allows access to a resource only if the - expression evaluates to ``true``. A condition can add - constraints based on attributes of the request, the - resource, or both. To learn which resources support - conditions in their IAM policies, see the `IAM - documentation `__. - - **JSON example:** - - :: - - { - "bindings": [ - { - "role": "roles/resourcemanager.organizationAdmin", - "members": [ - "user:mike@example.com", - "group:admins@example.com", - "domain:google.com", - "serviceAccount:my-project-id@appspot.gserviceaccount.com" - ] - }, - { - "role": "roles/resourcemanager.organizationViewer", - "members": [ - "user:eve@example.com" - ], - "condition": { - "title": "expirable access", - "description": "Does not grant access after Sep 2020", - "expression": "request.time < - timestamp('2020-10-01T00:00:00.000Z')", - } - } - ], - "etag": "BwWWja0YfJA=", - "version": 3 - } - - **YAML example:** - - :: - - bindings: - - members: - - user:mike@example.com - - group:admins@example.com - - domain:google.com - - serviceAccount:my-project-id@appspot.gserviceaccount.com - role: roles/resourcemanager.organizationAdmin - - members: - - user:eve@example.com - role: roles/resourcemanager.organizationViewer - condition: - title: expirable access - description: Does not grant access after Sep 2020 - expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 - - For a description of IAM and its features, see the `IAM - documentation `__. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy', - 'body': '*', - }, -{ - 'method': 'post', - 'uri': '/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:setIamPolicy', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_set_iam_policy(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = policy_pb2.Policy() - pb_resp = resp - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_set_iam_policy(resp) - return resp - - class _SnapshotTable(BigtableTableAdminRestStub): - def __hash__(self): - return hash("SnapshotTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.SnapshotTableRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the snapshot table method over HTTP. - - Args: - request (~.bigtable_table_admin.SnapshotTableRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to - most Cloud Bigtable customers. This feature might be - changed in backward-incompatible ways and is not - recommended for production use. It is not subject to any - SLA or deprecation policy. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{name=projects/*/instances/*/tables/*}:snapshot', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_snapshot_table(request, metadata) - pb_request = bigtable_table_admin.SnapshotTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_snapshot_table(resp) - return resp - - class _TestIamPermissions(BigtableTableAdminRestStub): - def __hash__(self): - return hash("TestIamPermissions") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: iam_policy_pb2.TestIamPermissionsRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> iam_policy_pb2.TestIamPermissionsResponse: - r"""Call the test iam permissions method over HTTP. - - Args: - request (~.iam_policy_pb2.TestIamPermissionsRequest): - The request object. Request message for ``TestIamPermissions`` method. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.iam_policy_pb2.TestIamPermissionsResponse: - Response message for ``TestIamPermissions`` method. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions', - 'body': '*', - }, -{ - 'method': 'post', - 'uri': '/v2/{resource=projects/*/instances/*/clusters/*/backups/*}:testIamPermissions', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_test_iam_permissions(request, metadata) - pb_request = request - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = iam_policy_pb2.TestIamPermissionsResponse() - pb_resp = resp - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_test_iam_permissions(resp) - return resp - - class _UndeleteTable(BigtableTableAdminRestStub): - def __hash__(self): - return hash("UndeleteTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.UndeleteTableRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the undelete table method over HTTP. - - Args: - request (~.bigtable_table_admin.UndeleteTableRequest): - The request object. Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'post', - 'uri': '/v2/{name=projects/*/instances/*/tables/*}:undelete', - 'body': '*', - }, - ] - request, metadata = self._interceptor.pre_undelete_table(request, metadata) - pb_request = bigtable_table_admin.UndeleteTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_undelete_table(resp) - return resp - - class _UpdateBackup(BigtableTableAdminRestStub): - def __hash__(self): - return hash("UpdateBackup") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask" : {}, } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.UpdateBackupRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> table.Backup: - r"""Call the update backup method over HTTP. - - Args: - request (~.bigtable_table_admin.UpdateBackupRequest): - The request object. The request for - [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.table.Backup: - A backup of a Cloud Bigtable table. - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'patch', - 'uri': '/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}', - 'body': 'backup', - }, - ] - request, metadata = self._interceptor.pre_update_backup(request, metadata) - pb_request = bigtable_table_admin.UpdateBackupRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = table.Backup() - pb_resp = table.Backup.pb(resp) - - json_format.Parse(response.content, pb_resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_backup(resp) - return resp - - class _UpdateTable(BigtableTableAdminRestStub): - def __hash__(self): - return hash("UpdateTable") - - __REQUIRED_FIELDS_DEFAULT_VALUES: Dict[str, Any] = { - "updateMask" : {}, } - - @classmethod - def _get_unset_required_fields(cls, message_dict): - return {k: v for k, v in cls.__REQUIRED_FIELDS_DEFAULT_VALUES.items() if k not in message_dict} - - def __call__(self, - request: bigtable_table_admin.UpdateTableRequest, *, - retry: OptionalRetry=gapic_v1.method.DEFAULT, - timeout: Optional[float]=None, - metadata: Sequence[Tuple[str, str]]=(), - ) -> operations_pb2.Operation: - r"""Call the update table method over HTTP. - - Args: - request (~.bigtable_table_admin.UpdateTableRequest): - The request object. The request for - [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. - retry (google.api_core.retry.Retry): Designation of what errors, if any, - should be retried. - timeout (float): The timeout for this request. - metadata (Sequence[Tuple[str, str]]): Strings which should be - sent along with the request as metadata. - - Returns: - ~.operations_pb2.Operation: - This resource represents a - long-running operation that is the - result of a network API call. - - """ - - http_options: List[Dict[str, str]] = [{ - 'method': 'patch', - 'uri': '/v2/{table.name=projects/*/instances/*/tables/*}', - 'body': 'table', - }, - ] - request, metadata = self._interceptor.pre_update_table(request, metadata) - pb_request = bigtable_table_admin.UpdateTableRequest.pb(request) - transcoded_request = path_template.transcode(http_options, pb_request) - - # Jsonify the request body - - body = json_format.MessageToJson( - transcoded_request['body'], - including_default_value_fields=False, - use_integers_for_enums=True - ) - uri = transcoded_request['uri'] - method = transcoded_request['method'] - - # Jsonify the query params - query_params = json.loads(json_format.MessageToJson( - transcoded_request['query_params'], - including_default_value_fields=False, - use_integers_for_enums=True, - )) - query_params.update(self._get_unset_required_fields(query_params)) - - query_params["$alt"] = "json;enum-encoding=int" - - # Send the request - headers = dict(metadata) - headers['Content-Type'] = 'application/json' - response = getattr(self._session, method)( - "{host}{uri}".format(host=self._host, uri=uri), - timeout=timeout, - headers=headers, - params=rest_helpers.flatten_query_params(query_params, strict=True), - data=body, - ) - - # In case of error, raise the appropriate core_exceptions.GoogleAPICallError exception - # subclass. - if response.status_code >= 400: - raise core_exceptions.from_http_response(response) - - # Return the response - resp = operations_pb2.Operation() - json_format.Parse(response.content, resp, ignore_unknown_fields=True) - resp = self._interceptor.post_update_table(resp) - return resp - - @property - def check_consistency(self) -> Callable[ - [bigtable_table_admin.CheckConsistencyRequest], - bigtable_table_admin.CheckConsistencyResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CheckConsistency(self._session, self._host, self._interceptor) # type: ignore - - @property - def copy_backup(self) -> Callable[ - [bigtable_table_admin.CopyBackupRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CopyBackup(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_backup(self) -> Callable[ - [bigtable_table_admin.CreateBackupRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateBackup(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_table(self) -> Callable[ - [bigtable_table_admin.CreateTableRequest], - gba_table.Table]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def create_table_from_snapshot(self) -> Callable[ - [bigtable_table_admin.CreateTableFromSnapshotRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._CreateTableFromSnapshot(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_backup(self) -> Callable[ - [bigtable_table_admin.DeleteBackupRequest], - empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteBackup(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_snapshot(self) -> Callable[ - [bigtable_table_admin.DeleteSnapshotRequest], - empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteSnapshot(self._session, self._host, self._interceptor) # type: ignore - - @property - def delete_table(self) -> Callable[ - [bigtable_table_admin.DeleteTableRequest], - empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DeleteTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def drop_row_range(self) -> Callable[ - [bigtable_table_admin.DropRowRangeRequest], - empty_pb2.Empty]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._DropRowRange(self._session, self._host, self._interceptor) # type: ignore - - @property - def generate_consistency_token(self) -> Callable[ - [bigtable_table_admin.GenerateConsistencyTokenRequest], - bigtable_table_admin.GenerateConsistencyTokenResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GenerateConsistencyToken(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_backup(self) -> Callable[ - [bigtable_table_admin.GetBackupRequest], - table.Backup]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetBackup(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_iam_policy(self) -> Callable[ - [iam_policy_pb2.GetIamPolicyRequest], - policy_pb2.Policy]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetIamPolicy(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_snapshot(self) -> Callable[ - [bigtable_table_admin.GetSnapshotRequest], - table.Snapshot]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetSnapshot(self._session, self._host, self._interceptor) # type: ignore - - @property - def get_table(self) -> Callable[ - [bigtable_table_admin.GetTableRequest], - table.Table]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._GetTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_backups(self) -> Callable[ - [bigtable_table_admin.ListBackupsRequest], - bigtable_table_admin.ListBackupsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListBackups(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_snapshots(self) -> Callable[ - [bigtable_table_admin.ListSnapshotsRequest], - bigtable_table_admin.ListSnapshotsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListSnapshots(self._session, self._host, self._interceptor) # type: ignore - - @property - def list_tables(self) -> Callable[ - [bigtable_table_admin.ListTablesRequest], - bigtable_table_admin.ListTablesResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ListTables(self._session, self._host, self._interceptor) # type: ignore - - @property - def modify_column_families(self) -> Callable[ - [bigtable_table_admin.ModifyColumnFamiliesRequest], - table.Table]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._ModifyColumnFamilies(self._session, self._host, self._interceptor) # type: ignore - - @property - def restore_table(self) -> Callable[ - [bigtable_table_admin.RestoreTableRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._RestoreTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def set_iam_policy(self) -> Callable[ - [iam_policy_pb2.SetIamPolicyRequest], - policy_pb2.Policy]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._SetIamPolicy(self._session, self._host, self._interceptor) # type: ignore - - @property - def snapshot_table(self) -> Callable[ - [bigtable_table_admin.SnapshotTableRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._SnapshotTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def test_iam_permissions(self) -> Callable[ - [iam_policy_pb2.TestIamPermissionsRequest], - iam_policy_pb2.TestIamPermissionsResponse]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._TestIamPermissions(self._session, self._host, self._interceptor) # type: ignore - - @property - def undelete_table(self) -> Callable[ - [bigtable_table_admin.UndeleteTableRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UndeleteTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_backup(self) -> Callable[ - [bigtable_table_admin.UpdateBackupRequest], - table.Backup]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateBackup(self._session, self._host, self._interceptor) # type: ignore - - @property - def update_table(self) -> Callable[ - [bigtable_table_admin.UpdateTableRequest], - operations_pb2.Operation]: - # The return type is fine, but mypy isn't sophisticated enough to determine what's going on here. - # In C++ this would require a dynamic_cast - return self._UpdateTable(self._session, self._host, self._interceptor) # type: ignore - - @property - def kind(self) -> str: - return "rest" - - def close(self): - self._session.close() - - -__all__=( - 'BigtableTableAdminRestTransport', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/__init__.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/__init__.py deleted file mode 100644 index 2ed192c75..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/__init__.py +++ /dev/null @@ -1,186 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from .bigtable_instance_admin import ( - CreateAppProfileRequest, - CreateClusterMetadata, - CreateClusterRequest, - CreateInstanceMetadata, - CreateInstanceRequest, - DeleteAppProfileRequest, - DeleteClusterRequest, - DeleteInstanceRequest, - GetAppProfileRequest, - GetClusterRequest, - GetInstanceRequest, - ListAppProfilesRequest, - ListAppProfilesResponse, - ListClustersRequest, - ListClustersResponse, - ListHotTabletsRequest, - ListHotTabletsResponse, - ListInstancesRequest, - ListInstancesResponse, - PartialUpdateClusterMetadata, - PartialUpdateClusterRequest, - PartialUpdateInstanceRequest, - UpdateAppProfileMetadata, - UpdateAppProfileRequest, - UpdateClusterMetadata, - UpdateInstanceMetadata, -) -from .bigtable_table_admin import ( - CheckConsistencyRequest, - CheckConsistencyResponse, - CopyBackupMetadata, - CopyBackupRequest, - CreateBackupMetadata, - CreateBackupRequest, - CreateTableFromSnapshotMetadata, - CreateTableFromSnapshotRequest, - CreateTableRequest, - DeleteBackupRequest, - DeleteSnapshotRequest, - DeleteTableRequest, - DropRowRangeRequest, - GenerateConsistencyTokenRequest, - GenerateConsistencyTokenResponse, - GetBackupRequest, - GetSnapshotRequest, - GetTableRequest, - ListBackupsRequest, - ListBackupsResponse, - ListSnapshotsRequest, - ListSnapshotsResponse, - ListTablesRequest, - ListTablesResponse, - ModifyColumnFamiliesRequest, - OptimizeRestoredTableMetadata, - RestoreTableMetadata, - RestoreTableRequest, - SnapshotTableMetadata, - SnapshotTableRequest, - UndeleteTableMetadata, - UndeleteTableRequest, - UpdateBackupRequest, - UpdateTableMetadata, - UpdateTableRequest, -) -from .common import ( - OperationProgress, - StorageType, -) -from .instance import ( - AppProfile, - AutoscalingLimits, - AutoscalingTargets, - Cluster, - HotTablet, - Instance, -) -from .table import ( - Backup, - BackupInfo, - ChangeStreamConfig, - ColumnFamily, - EncryptionInfo, - GcRule, - RestoreInfo, - Snapshot, - Table, - RestoreSourceType, -) - -__all__ = ( - 'CreateAppProfileRequest', - 'CreateClusterMetadata', - 'CreateClusterRequest', - 'CreateInstanceMetadata', - 'CreateInstanceRequest', - 'DeleteAppProfileRequest', - 'DeleteClusterRequest', - 'DeleteInstanceRequest', - 'GetAppProfileRequest', - 'GetClusterRequest', - 'GetInstanceRequest', - 'ListAppProfilesRequest', - 'ListAppProfilesResponse', - 'ListClustersRequest', - 'ListClustersResponse', - 'ListHotTabletsRequest', - 'ListHotTabletsResponse', - 'ListInstancesRequest', - 'ListInstancesResponse', - 'PartialUpdateClusterMetadata', - 'PartialUpdateClusterRequest', - 'PartialUpdateInstanceRequest', - 'UpdateAppProfileMetadata', - 'UpdateAppProfileRequest', - 'UpdateClusterMetadata', - 'UpdateInstanceMetadata', - 'CheckConsistencyRequest', - 'CheckConsistencyResponse', - 'CopyBackupMetadata', - 'CopyBackupRequest', - 'CreateBackupMetadata', - 'CreateBackupRequest', - 'CreateTableFromSnapshotMetadata', - 'CreateTableFromSnapshotRequest', - 'CreateTableRequest', - 'DeleteBackupRequest', - 'DeleteSnapshotRequest', - 'DeleteTableRequest', - 'DropRowRangeRequest', - 'GenerateConsistencyTokenRequest', - 'GenerateConsistencyTokenResponse', - 'GetBackupRequest', - 'GetSnapshotRequest', - 'GetTableRequest', - 'ListBackupsRequest', - 'ListBackupsResponse', - 'ListSnapshotsRequest', - 'ListSnapshotsResponse', - 'ListTablesRequest', - 'ListTablesResponse', - 'ModifyColumnFamiliesRequest', - 'OptimizeRestoredTableMetadata', - 'RestoreTableMetadata', - 'RestoreTableRequest', - 'SnapshotTableMetadata', - 'SnapshotTableRequest', - 'UndeleteTableMetadata', - 'UndeleteTableRequest', - 'UpdateBackupRequest', - 'UpdateTableMetadata', - 'UpdateTableRequest', - 'OperationProgress', - 'StorageType', - 'AppProfile', - 'AutoscalingLimits', - 'AutoscalingTargets', - 'Cluster', - 'HotTablet', - 'Instance', - 'Backup', - 'BackupInfo', - 'ChangeStreamConfig', - 'ColumnFamily', - 'EncryptionInfo', - 'GcRule', - 'RestoreInfo', - 'Snapshot', - 'Table', - 'RestoreSourceType', -) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py deleted file mode 100644 index 0acfa10bb..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_instance_admin.py +++ /dev/null @@ -1,891 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.bigtable_admin_v2.types import instance as gba_instance -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.bigtable.admin.v2', - manifest={ - 'CreateInstanceRequest', - 'GetInstanceRequest', - 'ListInstancesRequest', - 'ListInstancesResponse', - 'PartialUpdateInstanceRequest', - 'DeleteInstanceRequest', - 'CreateClusterRequest', - 'GetClusterRequest', - 'ListClustersRequest', - 'ListClustersResponse', - 'DeleteClusterRequest', - 'CreateInstanceMetadata', - 'UpdateInstanceMetadata', - 'CreateClusterMetadata', - 'UpdateClusterMetadata', - 'PartialUpdateClusterMetadata', - 'PartialUpdateClusterRequest', - 'CreateAppProfileRequest', - 'GetAppProfileRequest', - 'ListAppProfilesRequest', - 'ListAppProfilesResponse', - 'UpdateAppProfileRequest', - 'DeleteAppProfileRequest', - 'UpdateAppProfileMetadata', - 'ListHotTabletsRequest', - 'ListHotTabletsResponse', - }, -) - - -class CreateInstanceRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.CreateInstance. - - Attributes: - parent (str): - Required. The unique name of the project in which to create - the new instance. Values are of the form - ``projects/{project}``. - instance_id (str): - Required. The ID to be used when referring to the new - instance within its project, e.g., just ``myinstance`` - rather than ``projects/myproject/instances/myinstance``. - instance (google.cloud.bigtable_admin_v2.types.Instance): - Required. The instance to create. Fields marked - ``OutputOnly`` must be left blank. - clusters (MutableMapping[str, google.cloud.bigtable_admin_v2.types.Cluster]): - Required. The clusters to be created within the instance, - mapped by desired cluster ID, e.g., just ``mycluster`` - rather than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - Fields marked ``OutputOnly`` must be left blank. Currently, - at most four clusters can be specified. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - instance_id: str = proto.Field( - proto.STRING, - number=2, - ) - instance: gba_instance.Instance = proto.Field( - proto.MESSAGE, - number=3, - message=gba_instance.Instance, - ) - clusters: MutableMapping[str, gba_instance.Cluster] = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=4, - message=gba_instance.Cluster, - ) - - -class GetInstanceRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.GetInstance. - - Attributes: - name (str): - Required. The unique name of the requested instance. Values - are of the form ``projects/{project}/instances/{instance}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListInstancesRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.ListInstances. - - Attributes: - parent (str): - Required. The unique name of the project for which a list of - instances is requested. Values are of the form - ``projects/{project}``. - page_token (str): - DEPRECATED: This field is unused and ignored. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class ListInstancesResponse(proto.Message): - r"""Response message for BigtableInstanceAdmin.ListInstances. - - Attributes: - instances (MutableSequence[google.cloud.bigtable_admin_v2.types.Instance]): - The list of requested instances. - failed_locations (MutableSequence[str]): - Locations from which Instance information could not be - retrieved, due to an outage or some other transient - condition. Instances whose Clusters are all in one of the - failed locations may be missing from ``instances``, and - Instances with at least one Cluster in a failed location may - only have partial information returned. Values are of the - form ``projects//locations/`` - next_page_token (str): - DEPRECATED: This field is unused and ignored. - """ - - @property - def raw_page(self): - return self - - instances: MutableSequence[gba_instance.Instance] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_instance.Instance, - ) - failed_locations: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=2, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=3, - ) - - -class PartialUpdateInstanceRequest(proto.Message): - r"""Request message for - BigtableInstanceAdmin.PartialUpdateInstance. - - Attributes: - instance (google.cloud.bigtable_admin_v2.types.Instance): - Required. The Instance which will (partially) - replace the current value. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The subset of Instance fields which - should be replaced. Must be explicitly set. - """ - - instance: gba_instance.Instance = proto.Field( - proto.MESSAGE, - number=1, - message=gba_instance.Instance, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class DeleteInstanceRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.DeleteInstance. - - Attributes: - name (str): - Required. The unique name of the instance to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateClusterRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.CreateCluster. - - Attributes: - parent (str): - Required. The unique name of the instance in which to create - the new cluster. Values are of the form - ``projects/{project}/instances/{instance}``. - cluster_id (str): - Required. The ID to be used when referring to the new - cluster within its instance, e.g., just ``mycluster`` rather - than - ``projects/myproject/instances/myinstance/clusters/mycluster``. - cluster (google.cloud.bigtable_admin_v2.types.Cluster): - Required. The cluster to be created. Fields marked - ``OutputOnly`` must be left blank. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - cluster_id: str = proto.Field( - proto.STRING, - number=2, - ) - cluster: gba_instance.Cluster = proto.Field( - proto.MESSAGE, - number=3, - message=gba_instance.Cluster, - ) - - -class GetClusterRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.GetCluster. - - Attributes: - name (str): - Required. The unique name of the requested cluster. Values - are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListClustersRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.ListClusters. - - Attributes: - parent (str): - Required. The unique name of the instance for which a list - of clusters is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use - ``{instance} = '-'`` to list Clusters for all Instances in a - project, e.g., ``projects/myproject/instances/-``. - page_token (str): - DEPRECATED: This field is unused and ignored. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class ListClustersResponse(proto.Message): - r"""Response message for BigtableInstanceAdmin.ListClusters. - - Attributes: - clusters (MutableSequence[google.cloud.bigtable_admin_v2.types.Cluster]): - The list of requested clusters. - failed_locations (MutableSequence[str]): - Locations from which Cluster information could not be - retrieved, due to an outage or some other transient - condition. Clusters from these locations may be missing from - ``clusters``, or may only have partial information returned. - Values are of the form - ``projects//locations/`` - next_page_token (str): - DEPRECATED: This field is unused and ignored. - """ - - @property - def raw_page(self): - return self - - clusters: MutableSequence[gba_instance.Cluster] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_instance.Cluster, - ) - failed_locations: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=2, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=3, - ) - - -class DeleteClusterRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.DeleteCluster. - - Attributes: - name (str): - Required. The unique name of the cluster to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class CreateInstanceMetadata(proto.Message): - r"""The metadata for the Operation returned by CreateInstance. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.CreateInstanceRequest): - The request that prompted the initiation of - this CreateInstance operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - """ - - original_request: 'CreateInstanceRequest' = proto.Field( - proto.MESSAGE, - number=1, - message='CreateInstanceRequest', - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class UpdateInstanceMetadata(proto.Message): - r"""The metadata for the Operation returned by UpdateInstance. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.PartialUpdateInstanceRequest): - The request that prompted the initiation of - this UpdateInstance operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - """ - - original_request: 'PartialUpdateInstanceRequest' = proto.Field( - proto.MESSAGE, - number=1, - message='PartialUpdateInstanceRequest', - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class CreateClusterMetadata(proto.Message): - r"""The metadata for the Operation returned by CreateCluster. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.CreateClusterRequest): - The request that prompted the initiation of - this CreateCluster operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - tables (MutableMapping[str, google.cloud.bigtable_admin_v2.types.CreateClusterMetadata.TableProgress]): - Keys: the full ``name`` of each table that existed in the - instance when CreateCluster was first called, i.e. - ``projects//instances//tables/
``. - Any table added to the instance by a later API call will be - created in the new cluster by that API call, not this one. - - Values: information on how much of a table's data has been - copied to the newly-created cluster so far. - """ - - class TableProgress(proto.Message): - r"""Progress info for copying a table's data to the new cluster. - - Attributes: - estimated_size_bytes (int): - Estimate of the size of the table to be - copied. - estimated_copied_bytes (int): - Estimate of the number of bytes copied so far for this - table. This will eventually reach 'estimated_size_bytes' - unless the table copy is CANCELLED. - state (google.cloud.bigtable_admin_v2.types.CreateClusterMetadata.TableProgress.State): - - """ - class State(proto.Enum): - r""" - - Values: - STATE_UNSPECIFIED (0): - No description available. - PENDING (1): - The table has not yet begun copying to the - new cluster. - COPYING (2): - The table is actively being copied to the new - cluster. - COMPLETED (3): - The table has been fully copied to the new - cluster. - CANCELLED (4): - The table was deleted before it finished - copying to the new cluster. Note that tables - deleted after completion will stay marked as - COMPLETED, not CANCELLED. - """ - STATE_UNSPECIFIED = 0 - PENDING = 1 - COPYING = 2 - COMPLETED = 3 - CANCELLED = 4 - - estimated_size_bytes: int = proto.Field( - proto.INT64, - number=2, - ) - estimated_copied_bytes: int = proto.Field( - proto.INT64, - number=3, - ) - state: 'CreateClusterMetadata.TableProgress.State' = proto.Field( - proto.ENUM, - number=4, - enum='CreateClusterMetadata.TableProgress.State', - ) - - original_request: 'CreateClusterRequest' = proto.Field( - proto.MESSAGE, - number=1, - message='CreateClusterRequest', - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - tables: MutableMapping[str, TableProgress] = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=4, - message=TableProgress, - ) - - -class UpdateClusterMetadata(proto.Message): - r"""The metadata for the Operation returned by UpdateCluster. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.Cluster): - The request that prompted the initiation of - this UpdateCluster operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - """ - - original_request: gba_instance.Cluster = proto.Field( - proto.MESSAGE, - number=1, - message=gba_instance.Cluster, - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class PartialUpdateClusterMetadata(proto.Message): - r"""The metadata for the Operation returned by - PartialUpdateCluster. - - Attributes: - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - original_request (google.cloud.bigtable_admin_v2.types.PartialUpdateClusterRequest): - The original request for - PartialUpdateCluster. - """ - - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=1, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - original_request: 'PartialUpdateClusterRequest' = proto.Field( - proto.MESSAGE, - number=3, - message='PartialUpdateClusterRequest', - ) - - -class PartialUpdateClusterRequest(proto.Message): - r"""Request message for - BigtableInstanceAdmin.PartialUpdateCluster. - - Attributes: - cluster (google.cloud.bigtable_admin_v2.types.Cluster): - Required. The Cluster which contains the partial updates to - be applied, subject to the update_mask. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The subset of Cluster fields which - should be replaced. - """ - - cluster: gba_instance.Cluster = proto.Field( - proto.MESSAGE, - number=1, - message=gba_instance.Cluster, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class CreateAppProfileRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.CreateAppProfile. - - Attributes: - parent (str): - Required. The unique name of the instance in which to create - the new app profile. Values are of the form - ``projects/{project}/instances/{instance}``. - app_profile_id (str): - Required. The ID to be used when referring to the new app - profile within its instance, e.g., just ``myprofile`` rather - than - ``projects/myproject/instances/myinstance/appProfiles/myprofile``. - app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): - Required. The app profile to be created. Fields marked - ``OutputOnly`` will be ignored. - ignore_warnings (bool): - If true, ignore safety checks when creating - the app profile. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - app_profile_id: str = proto.Field( - proto.STRING, - number=2, - ) - app_profile: gba_instance.AppProfile = proto.Field( - proto.MESSAGE, - number=3, - message=gba_instance.AppProfile, - ) - ignore_warnings: bool = proto.Field( - proto.BOOL, - number=4, - ) - - -class GetAppProfileRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.GetAppProfile. - - Attributes: - name (str): - Required. The unique name of the requested app profile. - Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListAppProfilesRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.ListAppProfiles. - - Attributes: - parent (str): - Required. The unique name of the instance for which a list - of app profiles is requested. Values are of the form - ``projects/{project}/instances/{instance}``. Use - ``{instance} = '-'`` to list AppProfiles for all Instances - in a project, e.g., ``projects/myproject/instances/-``. - page_size (int): - Maximum number of results per page. - - A page_size of zero lets the server choose the number of - items to return. A page_size which is strictly positive will - return at most that many items. A negative page_size will - cause an error. - - Following the first request, subsequent paginated calls are - not required to pass a page_size. If a page_size is set in - subsequent calls, it must match the page_size given in the - first request. - page_token (str): - The value of ``next_page_token`` returned by a previous - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - page_size: int = proto.Field( - proto.INT32, - number=3, - ) - page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class ListAppProfilesResponse(proto.Message): - r"""Response message for BigtableInstanceAdmin.ListAppProfiles. - - Attributes: - app_profiles (MutableSequence[google.cloud.bigtable_admin_v2.types.AppProfile]): - The list of requested app profiles. - next_page_token (str): - Set if not all app profiles could be returned in a single - response. Pass this value to ``page_token`` in another - request to get the next page of results. - failed_locations (MutableSequence[str]): - Locations from which AppProfile information could not be - retrieved, due to an outage or some other transient - condition. AppProfiles from these locations may be missing - from ``app_profiles``. Values are of the form - ``projects//locations/`` - """ - - @property - def raw_page(self): - return self - - app_profiles: MutableSequence[gba_instance.AppProfile] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_instance.AppProfile, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - failed_locations: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=3, - ) - - -class UpdateAppProfileRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.UpdateAppProfile. - - Attributes: - app_profile (google.cloud.bigtable_admin_v2.types.AppProfile): - Required. The app profile which will - (partially) replace the current value. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The subset of app profile fields - which should be replaced. If unset, all fields - will be replaced. - ignore_warnings (bool): - If true, ignore safety checks when updating - the app profile. - """ - - app_profile: gba_instance.AppProfile = proto.Field( - proto.MESSAGE, - number=1, - message=gba_instance.AppProfile, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - ignore_warnings: bool = proto.Field( - proto.BOOL, - number=3, - ) - - -class DeleteAppProfileRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.DeleteAppProfile. - - Attributes: - name (str): - Required. The unique name of the app profile to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/{app_profile}``. - ignore_warnings (bool): - Required. If true, ignore safety checks when - deleting the app profile. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - ignore_warnings: bool = proto.Field( - proto.BOOL, - number=2, - ) - - -class UpdateAppProfileMetadata(proto.Message): - r"""The metadata for the Operation returned by UpdateAppProfile. - """ - - -class ListHotTabletsRequest(proto.Message): - r"""Request message for BigtableInstanceAdmin.ListHotTablets. - - Attributes: - parent (str): - Required. The cluster name to list hot tablets. Value is in - the following form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The start time to list hot tablets. The hot - tablets in the response will have start times - between the requested start time and end time. - Start time defaults to Now if it is unset, and - end time defaults to Now - 24 hours if it is - unset. The start time should be less than the - end time, and the maximum allowed time range - between start time and end time is 48 hours. - Start time and end time should have values - between Now and Now - 14 days. - end_time (google.protobuf.timestamp_pb2.Timestamp): - The end time to list hot tablets. - page_size (int): - Maximum number of results per page. - - A page_size that is empty or zero lets the server choose the - number of items to return. A page_size which is strictly - positive will return at most that many items. A negative - page_size will cause an error. - - Following the first request, subsequent paginated calls do - not need a page_size field. If a page_size is set in - subsequent calls, it must match the page_size given in the - first request. - page_token (str): - The value of ``next_page_token`` returned by a previous - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=5, - ) - - -class ListHotTabletsResponse(proto.Message): - r"""Response message for BigtableInstanceAdmin.ListHotTablets. - - Attributes: - hot_tablets (MutableSequence[google.cloud.bigtable_admin_v2.types.HotTablet]): - List of hot tablets in the tables of the - requested cluster that fall within the requested - time range. Hot tablets are ordered by node cpu - usage percent. If there are multiple hot tablets - that correspond to the same tablet within a - 15-minute interval, only the hot tablet with the - highest node cpu usage will be included in the - response. - next_page_token (str): - Set if not all hot tablets could be returned in a single - response. Pass this value to ``page_token`` in another - request to get the next page of results. - """ - - @property - def raw_page(self): - return self - - hot_tablets: MutableSequence[gba_instance.HotTablet] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_instance.HotTablet, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py deleted file mode 100644 index a8ca1da38..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/bigtable_table_admin.py +++ /dev/null @@ -1,1371 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.bigtable_admin_v2.types import common -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.bigtable.admin.v2', - manifest={ - 'RestoreTableRequest', - 'RestoreTableMetadata', - 'OptimizeRestoredTableMetadata', - 'CreateTableRequest', - 'CreateTableFromSnapshotRequest', - 'DropRowRangeRequest', - 'ListTablesRequest', - 'ListTablesResponse', - 'GetTableRequest', - 'UpdateTableRequest', - 'UpdateTableMetadata', - 'DeleteTableRequest', - 'UndeleteTableRequest', - 'UndeleteTableMetadata', - 'ModifyColumnFamiliesRequest', - 'GenerateConsistencyTokenRequest', - 'GenerateConsistencyTokenResponse', - 'CheckConsistencyRequest', - 'CheckConsistencyResponse', - 'SnapshotTableRequest', - 'GetSnapshotRequest', - 'ListSnapshotsRequest', - 'ListSnapshotsResponse', - 'DeleteSnapshotRequest', - 'SnapshotTableMetadata', - 'CreateTableFromSnapshotMetadata', - 'CreateBackupRequest', - 'CreateBackupMetadata', - 'UpdateBackupRequest', - 'GetBackupRequest', - 'DeleteBackupRequest', - 'ListBackupsRequest', - 'ListBackupsResponse', - 'CopyBackupRequest', - 'CopyBackupMetadata', - }, -) - - -class RestoreTableRequest(proto.Message): - r"""The request for - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - parent (str): - Required. The name of the instance in which to create the - restored table. Values are of the form - ``projects//instances/``. - table_id (str): - Required. The id of the table to create and restore to. This - table must not already exist. The ``table_id`` appended to - ``parent`` forms the full table name of the form - ``projects//instances//tables/``. - backup (str): - Name of the backup from which to restore. Values are of the - form - ``projects//instances//clusters//backups/``. - - This field is a member of `oneof`_ ``source``. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - table_id: str = proto.Field( - proto.STRING, - number=2, - ) - backup: str = proto.Field( - proto.STRING, - number=3, - oneof='source', - ) - - -class RestoreTableMetadata(proto.Message): - r"""Metadata type for the long-running operation returned by - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - Name of the table being created and restored - to. - source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType): - The type of the restore source. - backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): - - This field is a member of `oneof`_ ``source_info``. - optimize_table_operation_name (str): - If exists, the name of the long-running operation that will - be used to track the post-restore optimization process to - optimize the performance of the restored table. The metadata - type of the long-running operation is - [OptimizeRestoreTableMetadata][]. The response type is - [Empty][google.protobuf.Empty]. This long-running operation - may be automatically created by the system if applicable - after the RestoreTable long-running operation completes - successfully. This operation may not be created if the table - is already optimized or the restore was not successful. - progress (google.cloud.bigtable_admin_v2.types.OperationProgress): - The progress of the - [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] - operation. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - source_type: gba_table.RestoreSourceType = proto.Field( - proto.ENUM, - number=2, - enum=gba_table.RestoreSourceType, - ) - backup_info: gba_table.BackupInfo = proto.Field( - proto.MESSAGE, - number=3, - oneof='source_info', - message=gba_table.BackupInfo, - ) - optimize_table_operation_name: str = proto.Field( - proto.STRING, - number=4, - ) - progress: common.OperationProgress = proto.Field( - proto.MESSAGE, - number=5, - message=common.OperationProgress, - ) - - -class OptimizeRestoredTableMetadata(proto.Message): - r"""Metadata type for the long-running operation used to track - the progress of optimizations performed on a newly restored - table. This long-running operation is automatically created by - the system after the successful completion of a table restore, - and cannot be cancelled. - - Attributes: - name (str): - Name of the restored table being optimized. - progress (google.cloud.bigtable_admin_v2.types.OperationProgress): - The progress of the post-restore - optimizations. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - progress: common.OperationProgress = proto.Field( - proto.MESSAGE, - number=2, - message=common.OperationProgress, - ) - - -class CreateTableRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] - - Attributes: - parent (str): - Required. The unique name of the instance in which to create - the table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id (str): - Required. The name by which the new table should be referred - to within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. Maximum 50 characters. - table (google.cloud.bigtable_admin_v2.types.Table): - Required. The Table to create. - initial_splits (MutableSequence[google.cloud.bigtable_admin_v2.types.CreateTableRequest.Split]): - The optional list of row keys that will be used to initially - split the table into several tablets (tablets are similar to - HBase regions). Given two split keys, ``s1`` and ``s2``, - three tablets will be created, spanning the key ranges: - ``[, s1), [s1, s2), [s2, )``. - - Example: - - - Row keys := - ``["a", "apple", "custom", "customer_1", "customer_2",`` - ``"other", "zz"]`` - - initial_split_keys := - ``["apple", "customer_1", "customer_2", "other"]`` - - Key assignment: - - - Tablet 1 ``[, apple) => {"a"}.`` - - Tablet 2 - ``[apple, customer_1) => {"apple", "custom"}.`` - - Tablet 3 - ``[customer_1, customer_2) => {"customer_1"}.`` - - Tablet 4 ``[customer_2, other) => {"customer_2"}.`` - - Tablet 5 ``[other, ) => {"other", "zz"}.`` - """ - - class Split(proto.Message): - r"""An initial split point for a newly created table. - - Attributes: - key (bytes): - Row key to use as an initial tablet boundary. - """ - - key: bytes = proto.Field( - proto.BYTES, - number=1, - ) - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - table_id: str = proto.Field( - proto.STRING, - number=2, - ) - table: gba_table.Table = proto.Field( - proto.MESSAGE, - number=3, - message=gba_table.Table, - ) - initial_splits: MutableSequence[Split] = proto.RepeatedField( - proto.MESSAGE, - number=4, - message=Split, - ) - - -class CreateTableFromSnapshotRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] - - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible - ways and is not recommended for production use. It is not subject to - any SLA or deprecation policy. - - Attributes: - parent (str): - Required. The unique name of the instance in which to create - the table. Values are of the form - ``projects/{project}/instances/{instance}``. - table_id (str): - Required. The name by which the new table should be referred - to within the parent instance, e.g., ``foobar`` rather than - ``{parent}/tables/foobar``. - source_snapshot (str): - Required. The unique name of the snapshot from which to - restore the table. The snapshot and the table must be in the - same instance. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - table_id: str = proto.Field( - proto.STRING, - number=2, - ) - source_snapshot: str = proto.Field( - proto.STRING, - number=3, - ) - - -class DropRowRangeRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - Required. The unique name of the table on which to drop a - range of rows. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - row_key_prefix (bytes): - Delete all rows that start with this row key - prefix. Prefix cannot be zero length. - - This field is a member of `oneof`_ ``target``. - delete_all_data_from_table (bool): - Delete all rows in the table. Setting this to - false is a no-op. - - This field is a member of `oneof`_ ``target``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - row_key_prefix: bytes = proto.Field( - proto.BYTES, - number=2, - oneof='target', - ) - delete_all_data_from_table: bool = proto.Field( - proto.BOOL, - number=3, - oneof='target', - ) - - -class ListTablesRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Attributes: - parent (str): - Required. The unique name of the instance for which tables - should be listed. Values are of the form - ``projects/{project}/instances/{instance}``. - view (google.cloud.bigtable_admin_v2.types.Table.View): - The view to be applied to the returned tables' fields. - NAME_ONLY view (default) and REPLICATION_VIEW are supported. - page_size (int): - Maximum number of results per page. - - A page_size of zero lets the server choose the number of - items to return. A page_size which is strictly positive will - return at most that many items. A negative page_size will - cause an error. - - Following the first request, subsequent paginated calls are - not required to pass a page_size. If a page_size is set in - subsequent calls, it must match the page_size given in the - first request. - page_token (str): - The value of ``next_page_token`` returned by a previous - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - view: gba_table.Table.View = proto.Field( - proto.ENUM, - number=2, - enum=gba_table.Table.View, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=3, - ) - - -class ListTablesResponse(proto.Message): - r"""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListTables][google.bigtable.admin.v2.BigtableTableAdmin.ListTables] - - Attributes: - tables (MutableSequence[google.cloud.bigtable_admin_v2.types.Table]): - The tables present in the requested instance. - next_page_token (str): - Set if not all tables could be returned in a single - response. Pass this value to ``page_token`` in another - request to get the next page of results. - """ - - @property - def raw_page(self): - return self - - tables: MutableSequence[gba_table.Table] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_table.Table, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class GetTableRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetTable][google.bigtable.admin.v2.BigtableTableAdmin.GetTable] - - Attributes: - name (str): - Required. The unique name of the requested table. Values are - of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - view (google.cloud.bigtable_admin_v2.types.Table.View): - The view to be applied to the returned table's fields. - Defaults to ``SCHEMA_VIEW`` if unspecified. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - view: gba_table.Table.View = proto.Field( - proto.ENUM, - number=2, - enum=gba_table.Table.View, - ) - - -class UpdateTableRequest(proto.Message): - r"""The request for - [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. - - Attributes: - table (google.cloud.bigtable_admin_v2.types.Table): - Required. The table to update. The table's ``name`` field is - used to identify the table to update. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. The list of fields to update. A mask specifying - which fields (e.g. ``change_stream_config``) in the - ``table`` field should be updated. This mask is relative to - the ``table`` field, not to the request message. The - wildcard (*) path is currently not supported. Currently - UpdateTable is only supported for the following fields: - - - ``change_stream_config`` - - ``change_stream_config.retention_period`` - - ``deletion_protection`` - - If ``column_families`` is set in ``update_mask``, it will - return an UNIMPLEMENTED error. - """ - - table: gba_table.Table = proto.Field( - proto.MESSAGE, - number=1, - message=gba_table.Table, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class UpdateTableMetadata(proto.Message): - r"""Metadata type for the operation returned by - [UpdateTable][google.bigtable.admin.v2.BigtableTableAdmin.UpdateTable]. - - Attributes: - name (str): - The name of the table being updated. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which this operation started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - finished or was canceled. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class DeleteTableRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable] - - Attributes: - name (str): - Required. The unique name of the table to be deleted. Values - are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class UndeleteTableRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable] - - Attributes: - name (str): - Required. The unique name of the table to be restored. - Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class UndeleteTableMetadata(proto.Message): - r"""Metadata type for the operation returned by - [google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable][google.bigtable.admin.v2.BigtableTableAdmin.UndeleteTable]. - - Attributes: - name (str): - The name of the table being restored. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which this operation started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - finished or was cancelled. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class ModifyColumnFamiliesRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies][google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies] - - Attributes: - name (str): - Required. The unique name of the table whose families should - be modified. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - modifications (MutableSequence[google.cloud.bigtable_admin_v2.types.ModifyColumnFamiliesRequest.Modification]): - Required. Modifications to be atomically - applied to the specified table's families. - Entries are applied in order, meaning that - earlier modifications can be masked by later - ones (in the case of repeated updates to the - same family, for example). - ignore_warnings (bool): - Optional. If true, ignore safety checks when - modifying the column families. - """ - - class Modification(proto.Message): - r"""A create, update, or delete of a particular column family. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - id (str): - The ID of the column family to be modified. - create (google.cloud.bigtable_admin_v2.types.ColumnFamily): - Create a new column family with the specified - schema, or fail if one already exists with the - given ID. - - This field is a member of `oneof`_ ``mod``. - update (google.cloud.bigtable_admin_v2.types.ColumnFamily): - Update an existing column family to the - specified schema, or fail if no column family - exists with the given ID. - - This field is a member of `oneof`_ ``mod``. - drop (bool): - Drop (delete) the column family with the - given ID, or fail if no such family exists. - - This field is a member of `oneof`_ ``mod``. - """ - - id: str = proto.Field( - proto.STRING, - number=1, - ) - create: gba_table.ColumnFamily = proto.Field( - proto.MESSAGE, - number=2, - oneof='mod', - message=gba_table.ColumnFamily, - ) - update: gba_table.ColumnFamily = proto.Field( - proto.MESSAGE, - number=3, - oneof='mod', - message=gba_table.ColumnFamily, - ) - drop: bool = proto.Field( - proto.BOOL, - number=4, - oneof='mod', - ) - - name: str = proto.Field( - proto.STRING, - number=1, - ) - modifications: MutableSequence[Modification] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message=Modification, - ) - ignore_warnings: bool = proto.Field( - proto.BOOL, - number=3, - ) - - -class GenerateConsistencyTokenRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - Attributes: - name (str): - Required. The unique name of the Table for which to create a - consistency token. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class GenerateConsistencyTokenResponse(proto.Message): - r"""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken][google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken] - - Attributes: - consistency_token (str): - The generated consistency token. - """ - - consistency_token: str = proto.Field( - proto.STRING, - number=1, - ) - - -class CheckConsistencyRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - Attributes: - name (str): - Required. The unique name of the Table for which to check - replication consistency. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - consistency_token (str): - Required. The token created using - GenerateConsistencyToken for the Table. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - consistency_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class CheckConsistencyResponse(proto.Message): - r"""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency][google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency] - - Attributes: - consistent (bool): - True only if the token is consistent. A token - is consistent if replication has caught up with - the restrictions specified in the request. - """ - - consistent: bool = proto.Field( - proto.BOOL, - number=1, - ) - - -class SnapshotTableRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable][google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable] - - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible - ways and is not recommended for production use. It is not subject to - any SLA or deprecation policy. - - Attributes: - name (str): - Required. The unique name of the table to have the snapshot - taken. Values are of the form - ``projects/{project}/instances/{instance}/tables/{table}``. - cluster (str): - Required. The name of the cluster where the snapshot will be - created in. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - snapshot_id (str): - Required. The ID by which the new snapshot should be - referred to within the parent cluster, e.g., ``mysnapshot`` - of the form: ``[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` rather than - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot``. - ttl (google.protobuf.duration_pb2.Duration): - The amount of time that the new snapshot can - stay active after it is created. Once 'ttl' - expires, the snapshot will get deleted. The - maximum amount of time a snapshot can stay - active is 7 days. If 'ttl' is not specified, the - default value of 24 hours will be used. - description (str): - Description of the snapshot. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - cluster: str = proto.Field( - proto.STRING, - number=2, - ) - snapshot_id: str = proto.Field( - proto.STRING, - number=3, - ) - ttl: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=4, - message=duration_pb2.Duration, - ) - description: str = proto.Field( - proto.STRING, - number=5, - ) - - -class GetSnapshotRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot] - - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible - ways and is not recommended for production use. It is not subject to - any SLA or deprecation policy. - - Attributes: - name (str): - Required. The unique name of the requested snapshot. Values - are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListSnapshotsRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible - ways and is not recommended for production use. It is not subject to - any SLA or deprecation policy. - - Attributes: - parent (str): - Required. The unique name of the cluster for which snapshots - should be listed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list snapshots for all clusters - in an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - page_size (int): - The maximum number of snapshots to return per - page. CURRENTLY UNIMPLEMENTED AND IGNORED. - page_token (str): - The value of ``next_page_token`` returned by a previous - call. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - page_size: int = proto.Field( - proto.INT32, - number=2, - ) - page_token: str = proto.Field( - proto.STRING, - number=3, - ) - - -class ListSnapshotsResponse(proto.Message): - r"""Response message for - [google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots][google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots] - - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible - ways and is not recommended for production use. It is not subject to - any SLA or deprecation policy. - - Attributes: - snapshots (MutableSequence[google.cloud.bigtable_admin_v2.types.Snapshot]): - The snapshots present in the requested - cluster. - next_page_token (str): - Set if not all snapshots could be returned in a single - response. Pass this value to ``page_token`` in another - request to get the next page of results. - """ - - @property - def raw_page(self): - return self - - snapshots: MutableSequence[gba_table.Snapshot] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_table.Snapshot, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class DeleteSnapshotRequest(proto.Message): - r"""Request message for - [google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot] - - Note: This is a private alpha release of Cloud Bigtable snapshots. - This feature is not currently available to most Cloud Bigtable - customers. This feature might be changed in backward-incompatible - ways and is not recommended for production use. It is not subject to - any SLA or deprecation policy. - - Attributes: - name (str): - Required. The unique name of the snapshot to be deleted. - Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class SnapshotTableMetadata(proto.Message): - r"""The metadata for the Operation returned by SnapshotTable. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to most Cloud - Bigtable customers. This feature might be changed in - backward-incompatible ways and is not recommended for production - use. It is not subject to any SLA or deprecation policy. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.SnapshotTableRequest): - The request that prompted the initiation of - this SnapshotTable operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - """ - - original_request: 'SnapshotTableRequest' = proto.Field( - proto.MESSAGE, - number=1, - message='SnapshotTableRequest', - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class CreateTableFromSnapshotMetadata(proto.Message): - r"""The metadata for the Operation returned by - CreateTableFromSnapshot. - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to most Cloud - Bigtable customers. This feature might be changed in - backward-incompatible ways and is not recommended for production - use. It is not subject to any SLA or deprecation policy. - - Attributes: - original_request (google.cloud.bigtable_admin_v2.types.CreateTableFromSnapshotRequest): - The request that prompted the initiation of - this CreateTableFromSnapshot operation. - request_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the original request was - received. - finish_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which the operation failed or was - completed successfully. - """ - - original_request: 'CreateTableFromSnapshotRequest' = proto.Field( - proto.MESSAGE, - number=1, - message='CreateTableFromSnapshotRequest', - ) - request_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - finish_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -class CreateBackupRequest(proto.Message): - r"""The request for - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - - Attributes: - parent (str): - Required. This must be one of the clusters in the instance - in which this table is located. The backup will be stored in - this cluster. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - backup_id (str): - Required. The id of the backup to be created. The - ``backup_id`` along with the parent ``parent`` are combined - as {parent}/backups/{backup_id} to create the full backup - name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in length - and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - backup (google.cloud.bigtable_admin_v2.types.Backup): - Required. The backup to create. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - backup_id: str = proto.Field( - proto.STRING, - number=2, - ) - backup: gba_table.Backup = proto.Field( - proto.MESSAGE, - number=3, - message=gba_table.Backup, - ) - - -class CreateBackupMetadata(proto.Message): - r"""Metadata type for the operation returned by - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. - - Attributes: - name (str): - The name of the backup being created. - source_table (str): - The name of the table the backup is created - from. - start_time (google.protobuf.timestamp_pb2.Timestamp): - The time at which this operation started. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - finished or was cancelled. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - source_table: str = proto.Field( - proto.STRING, - number=2, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class UpdateBackupRequest(proto.Message): - r"""The request for - [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. - - Attributes: - backup (google.cloud.bigtable_admin_v2.types.Backup): - Required. The backup to update. ``backup.name``, and the - fields to be updated as specified by ``update_mask`` are - required. Other fields are ignored. Update is only supported - for the following fields: - - - ``backup.expire_time``. - update_mask (google.protobuf.field_mask_pb2.FieldMask): - Required. A mask specifying which fields (e.g. - ``expire_time``) in the Backup resource should be updated. - This mask is relative to the Backup resource, not to the - request message. The field mask must always be specified; - this prevents any future fields from being erased - accidentally by clients that do not know about them. - """ - - backup: gba_table.Backup = proto.Field( - proto.MESSAGE, - number=1, - message=gba_table.Backup, - ) - update_mask: field_mask_pb2.FieldMask = proto.Field( - proto.MESSAGE, - number=2, - message=field_mask_pb2.FieldMask, - ) - - -class GetBackupRequest(proto.Message): - r"""The request for - [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. - - Attributes: - name (str): - Required. Name of the backup. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class DeleteBackupRequest(proto.Message): - r"""The request for - [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. - - Attributes: - name (str): - Required. Name of the backup to delete. Values are of the - form - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}``. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - - -class ListBackupsRequest(proto.Message): - r"""The request for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - - Attributes: - parent (str): - Required. The cluster to list backups from. Values are of - the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - Use ``{cluster} = '-'`` to list backups for all clusters in - an instance, e.g., - ``projects/{project}/instances/{instance}/clusters/-``. - filter (str): - A filter expression that filters backups listed in the - response. The expression must specify the field name, a - comparison operator, and the value that you want to use for - filtering. The value must be a string, a number, or a - boolean. The comparison operator must be <, >, <=, >=, !=, - =, or :. Colon ':' represents a HAS operator which is - roughly synonymous with equality. Filter rules are case - insensitive. - - The fields eligible for filtering are: - - - ``name`` - - ``source_table`` - - ``state`` - - ``start_time`` (and values are of the format - YYYY-MM-DDTHH:MM:SSZ) - - ``end_time`` (and values are of the format - YYYY-MM-DDTHH:MM:SSZ) - - ``expire_time`` (and values are of the format - YYYY-MM-DDTHH:MM:SSZ) - - ``size_bytes`` - - To filter on multiple expressions, provide each separate - expression within parentheses. By default, each expression - is an AND expression. However, you can include AND, OR, and - NOT expressions explicitly. - - Some examples of using filters are: - - - ``name:"exact"`` --> The backup's name is the string - "exact". - - ``name:howl`` --> The backup's name contains the string - "howl". - - ``source_table:prod`` --> The source_table's name - contains the string "prod". - - ``state:CREATING`` --> The backup is pending creation. - - ``state:READY`` --> The backup is fully created and ready - for use. - - ``(name:howl) AND (start_time < \"2018-03-28T14:50:00Z\")`` - --> The backup name contains the string "howl" and - start_time of the backup is before 2018-03-28T14:50:00Z. - - ``size_bytes > 10000000000`` --> The backup's size is - greater than 10GB - order_by (str): - An expression for specifying the sort order of the results - of the request. The string value should specify one or more - fields in [Backup][google.bigtable.admin.v2.Backup]. The - full syntax is described at https://aip.dev/132#ordering. - - Fields supported are: - - - name - - source_table - - expire_time - - start_time - - end_time - - size_bytes - - state - - For example, "start_time". The default sorting order is - ascending. To specify descending order for the field, a - suffix " desc" should be appended to the field name. For - example, "start_time desc". Redundant space characters in - the syntax are insigificant. - - If order_by is empty, results will be sorted by - ``start_time`` in descending order starting from the most - recently created backup. - page_size (int): - Number of backups to be returned in the - response. If 0 or less, defaults to the server's - maximum allowed page size. - page_token (str): - If non-empty, ``page_token`` should contain a - [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] - from a previous - [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] - to the same ``parent`` and with the same ``filter``. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - filter: str = proto.Field( - proto.STRING, - number=2, - ) - order_by: str = proto.Field( - proto.STRING, - number=3, - ) - page_size: int = proto.Field( - proto.INT32, - number=4, - ) - page_token: str = proto.Field( - proto.STRING, - number=5, - ) - - -class ListBackupsResponse(proto.Message): - r"""The response for - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. - - Attributes: - backups (MutableSequence[google.cloud.bigtable_admin_v2.types.Backup]): - The list of matching backups. - next_page_token (str): - ``next_page_token`` can be sent in a subsequent - [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] - call to fetch more of the matching backups. - """ - - @property - def raw_page(self): - return self - - backups: MutableSequence[gba_table.Backup] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message=gba_table.Backup, - ) - next_page_token: str = proto.Field( - proto.STRING, - number=2, - ) - - -class CopyBackupRequest(proto.Message): - r"""The request for - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. - - Attributes: - parent (str): - Required. The name of the destination cluster that will - contain the backup copy. The cluster must already exists. - Values are of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - backup_id (str): - Required. The id of the new backup. The ``backup_id`` along - with ``parent`` are combined as {parent}/backups/{backup_id} - to create the full backup name, of the form: - ``projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup_id}``. - This string must be between 1 and 50 characters in length - and match the regex [*a-zA-Z0-9][-*.a-zA-Z0-9]*. - source_backup (str): - Required. The source backup to be copied from. The source - backup needs to be in READY state for it to be copied. - Copying a copied backup is not allowed. Once CopyBackup is - in progress, the source backup cannot be deleted or cleaned - up on expiration until CopyBackup is finished. Values are of - the form: - ``projects//instances//clusters//backups/``. - expire_time (google.protobuf.timestamp_pb2.Timestamp): - Required. Required. The expiration time of the copied backup - with microsecond granularity that must be at least 6 hours - and at most 30 days from the time the request is received. - Once the ``expire_time`` has passed, Cloud Bigtable will - delete the backup and free the resources used by the backup. - """ - - parent: str = proto.Field( - proto.STRING, - number=1, - ) - backup_id: str = proto.Field( - proto.STRING, - number=2, - ) - source_backup: str = proto.Field( - proto.STRING, - number=3, - ) - expire_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - - -class CopyBackupMetadata(proto.Message): - r"""Metadata type for the google.longrunning.Operation returned by - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup]. - - Attributes: - name (str): - The name of the backup being created through the copy - operation. Values are of the form - ``projects//instances//clusters//backups/``. - source_backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): - Information about the source backup that is - being copied from. - progress (google.cloud.bigtable_admin_v2.types.OperationProgress): - The progress of the - [CopyBackup][google.bigtable.admin.v2.BigtableTableAdmin.CopyBackup] - operation. - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - source_backup_info: gba_table.BackupInfo = proto.Field( - proto.MESSAGE, - number=2, - message=gba_table.BackupInfo, - ) - progress: common.OperationProgress = proto.Field( - proto.MESSAGE, - number=3, - message=common.OperationProgress, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/common.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/common.py deleted file mode 100644 index 76e9cd894..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/common.py +++ /dev/null @@ -1,81 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.bigtable.admin.v2', - manifest={ - 'StorageType', - 'OperationProgress', - }, -) - - -class StorageType(proto.Enum): - r"""Storage media types for persisting Bigtable data. - - Values: - STORAGE_TYPE_UNSPECIFIED (0): - The user did not specify a storage type. - SSD (1): - Flash (SSD) storage should be used. - HDD (2): - Magnetic drive (HDD) storage should be used. - """ - STORAGE_TYPE_UNSPECIFIED = 0 - SSD = 1 - HDD = 2 - - -class OperationProgress(proto.Message): - r"""Encapsulates progress related information for a Cloud - Bigtable long running operation. - - Attributes: - progress_percent (int): - Percent completion of the operation. - Values are between 0 and 100 inclusive. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Time the request was received. - end_time (google.protobuf.timestamp_pb2.Timestamp): - If set, the time at which this operation - failed or was completed successfully. - """ - - progress_percent: int = proto.Field( - proto.INT32, - number=1, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/instance.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/instance.py deleted file mode 100644 index 0aeaef0d3..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/instance.py +++ /dev/null @@ -1,620 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.cloud.bigtable_admin_v2.types import common -from google.protobuf import timestamp_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.bigtable.admin.v2', - manifest={ - 'Instance', - 'AutoscalingTargets', - 'AutoscalingLimits', - 'Cluster', - 'AppProfile', - 'HotTablet', - }, -) - - -class Instance(proto.Message): - r"""A collection of Bigtable [Tables][google.bigtable.admin.v2.Table] - and the resources that serve them. All tables in an instance are - served from all [Clusters][google.bigtable.admin.v2.Cluster] in the - instance. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - The unique name of the instance. Values are of the form - ``projects/{project}/instances/[a-z][a-z0-9\\-]+[a-z0-9]``. - display_name (str): - Required. The descriptive name for this - instance as it appears in UIs. Can be changed at - any time, but should be kept globally unique to - avoid confusion. - state (google.cloud.bigtable_admin_v2.types.Instance.State): - (``OutputOnly``) The current state of the instance. - type_ (google.cloud.bigtable_admin_v2.types.Instance.Type): - The type of the instance. Defaults to ``PRODUCTION``. - labels (MutableMapping[str, str]): - Labels are a flexible and lightweight mechanism for - organizing cloud resources into groups that reflect a - customer's organizational needs and deployment strategies. - They can be used to filter resources and aggregate metrics. - - - Label keys must be between 1 and 63 characters long and - must conform to the regular expression: - ``[\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62}``. - - Label values must be between 0 and 63 characters long and - must conform to the regular expression: - ``[\p{Ll}\p{Lo}\p{N}_-]{0,63}``. - - No more than 64 labels can be associated with a given - resource. - - Keys and values must both be under 128 bytes. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. A server-assigned timestamp representing when - this Instance was created. For instances created before this - field was added (August 2021), this value is - ``seconds: 0, nanos: 1``. - satisfies_pzs (bool): - Output only. Reserved for future use. - - This field is a member of `oneof`_ ``_satisfies_pzs``. - """ - class State(proto.Enum): - r"""Possible states of an instance. - - Values: - STATE_NOT_KNOWN (0): - The state of the instance could not be - determined. - READY (1): - The instance has been successfully created - and can serve requests to its tables. - CREATING (2): - The instance is currently being created, and - may be destroyed if the creation process - encounters an error. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - class Type(proto.Enum): - r"""The type of the instance. - - Values: - TYPE_UNSPECIFIED (0): - The type of the instance is unspecified. If set when - creating an instance, a ``PRODUCTION`` instance will be - created. If set when updating an instance, the type will be - left unchanged. - PRODUCTION (1): - An instance meant for production use. ``serve_nodes`` must - be set on the cluster. - DEVELOPMENT (2): - DEPRECATED: Prefer PRODUCTION for all use - cases, as it no longer enforces a higher minimum - node count than DEVELOPMENT. - """ - TYPE_UNSPECIFIED = 0 - PRODUCTION = 1 - DEVELOPMENT = 2 - - name: str = proto.Field( - proto.STRING, - number=1, - ) - display_name: str = proto.Field( - proto.STRING, - number=2, - ) - state: State = proto.Field( - proto.ENUM, - number=3, - enum=State, - ) - type_: Type = proto.Field( - proto.ENUM, - number=4, - enum=Type, - ) - labels: MutableMapping[str, str] = proto.MapField( - proto.STRING, - proto.STRING, - number=5, - ) - create_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=7, - message=timestamp_pb2.Timestamp, - ) - satisfies_pzs: bool = proto.Field( - proto.BOOL, - number=8, - optional=True, - ) - - -class AutoscalingTargets(proto.Message): - r"""The Autoscaling targets for a Cluster. These determine the - recommended nodes. - - Attributes: - cpu_utilization_percent (int): - The cpu utilization that the Autoscaler should be trying to - achieve. This number is on a scale from 0 (no utilization) - to 100 (total utilization), and is limited between 10 and - 80, otherwise it will return INVALID_ARGUMENT error. - storage_utilization_gib_per_node (int): - The storage utilization that the Autoscaler should be trying - to achieve. This number is limited between 2560 (2.5TiB) and - 5120 (5TiB) for a SSD cluster and between 8192 (8TiB) and - 16384 (16TiB) for an HDD cluster, otherwise it will return - INVALID_ARGUMENT error. If this value is set to 0, it will - be treated as if it were set to the default value: 2560 for - SSD, 8192 for HDD. - """ - - cpu_utilization_percent: int = proto.Field( - proto.INT32, - number=2, - ) - storage_utilization_gib_per_node: int = proto.Field( - proto.INT32, - number=3, - ) - - -class AutoscalingLimits(proto.Message): - r"""Limits for the number of nodes a Cluster can autoscale - up/down to. - - Attributes: - min_serve_nodes (int): - Required. Minimum number of nodes to scale - down to. - max_serve_nodes (int): - Required. Maximum number of nodes to scale up - to. - """ - - min_serve_nodes: int = proto.Field( - proto.INT32, - number=1, - ) - max_serve_nodes: int = proto.Field( - proto.INT32, - number=2, - ) - - -class Cluster(proto.Message): - r"""A resizable group of nodes in a particular cloud location, capable - of serving all [Tables][google.bigtable.admin.v2.Table] in the - parent [Instance][google.bigtable.admin.v2.Instance]. - - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - The unique name of the cluster. Values are of the form - ``projects/{project}/instances/{instance}/clusters/[a-z][-a-z0-9]*``. - location (str): - Immutable. The location where this cluster's nodes and - storage reside. For best performance, clients should be - located as close as possible to this cluster. Currently only - zones are supported, so values should be of the form - ``projects/{project}/locations/{zone}``. - state (google.cloud.bigtable_admin_v2.types.Cluster.State): - Output only. The current state of the - cluster. - serve_nodes (int): - The number of nodes allocated to this - cluster. More nodes enable higher throughput and - more consistent performance. - cluster_config (google.cloud.bigtable_admin_v2.types.Cluster.ClusterConfig): - Configuration for this cluster. - - This field is a member of `oneof`_ ``config``. - default_storage_type (google.cloud.bigtable_admin_v2.types.StorageType): - Immutable. The type of storage used by this - cluster to serve its parent instance's tables, - unless explicitly overridden. - encryption_config (google.cloud.bigtable_admin_v2.types.Cluster.EncryptionConfig): - Immutable. The encryption configuration for - CMEK-protected clusters. - """ - class State(proto.Enum): - r"""Possible states of a cluster. - - Values: - STATE_NOT_KNOWN (0): - The state of the cluster could not be - determined. - READY (1): - The cluster has been successfully created and - is ready to serve requests. - CREATING (2): - The cluster is currently being created, and - may be destroyed if the creation process - encounters an error. A cluster may not be able - to serve requests while being created. - RESIZING (3): - The cluster is currently being resized, and - may revert to its previous node count if the - process encounters an error. A cluster is still - capable of serving requests while being resized, - but may exhibit performance as if its number of - allocated nodes is between the starting and - requested states. - DISABLED (4): - The cluster has no backing nodes. The data - (tables) still exist, but no operations can be - performed on the cluster. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - RESIZING = 3 - DISABLED = 4 - - class ClusterAutoscalingConfig(proto.Message): - r"""Autoscaling config for a cluster. - - Attributes: - autoscaling_limits (google.cloud.bigtable_admin_v2.types.AutoscalingLimits): - Required. Autoscaling limits for this - cluster. - autoscaling_targets (google.cloud.bigtable_admin_v2.types.AutoscalingTargets): - Required. Autoscaling targets for this - cluster. - """ - - autoscaling_limits: 'AutoscalingLimits' = proto.Field( - proto.MESSAGE, - number=1, - message='AutoscalingLimits', - ) - autoscaling_targets: 'AutoscalingTargets' = proto.Field( - proto.MESSAGE, - number=2, - message='AutoscalingTargets', - ) - - class ClusterConfig(proto.Message): - r"""Configuration for a cluster. - - Attributes: - cluster_autoscaling_config (google.cloud.bigtable_admin_v2.types.Cluster.ClusterAutoscalingConfig): - Autoscaling configuration for this cluster. - """ - - cluster_autoscaling_config: 'Cluster.ClusterAutoscalingConfig' = proto.Field( - proto.MESSAGE, - number=1, - message='Cluster.ClusterAutoscalingConfig', - ) - - class EncryptionConfig(proto.Message): - r"""Cloud Key Management Service (Cloud KMS) settings for a - CMEK-protected cluster. - - Attributes: - kms_key_name (str): - Describes the Cloud KMS encryption key that will be used to - protect the destination Bigtable cluster. The requirements - for this key are: - - 1) The Cloud Bigtable service account associated with the - project that contains this cluster must be granted the - ``cloudkms.cryptoKeyEncrypterDecrypter`` role on the CMEK - key. - 2) Only regional keys can be used and the region of the CMEK - key must match the region of the cluster. - 3) All clusters within an instance must use the same CMEK - key. Values are of the form - ``projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key}`` - """ - - kms_key_name: str = proto.Field( - proto.STRING, - number=1, - ) - - name: str = proto.Field( - proto.STRING, - number=1, - ) - location: str = proto.Field( - proto.STRING, - number=2, - ) - state: State = proto.Field( - proto.ENUM, - number=3, - enum=State, - ) - serve_nodes: int = proto.Field( - proto.INT32, - number=4, - ) - cluster_config: ClusterConfig = proto.Field( - proto.MESSAGE, - number=7, - oneof='config', - message=ClusterConfig, - ) - default_storage_type: common.StorageType = proto.Field( - proto.ENUM, - number=5, - enum=common.StorageType, - ) - encryption_config: EncryptionConfig = proto.Field( - proto.MESSAGE, - number=6, - message=EncryptionConfig, - ) - - -class AppProfile(proto.Message): - r"""A configuration object describing how Cloud Bigtable should - treat traffic from a particular end user application. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - name (str): - The unique name of the app profile. Values are of the form - ``projects/{project}/instances/{instance}/appProfiles/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - etag (str): - Strongly validated etag for optimistic concurrency control. - Preserve the value returned from ``GetAppProfile`` when - calling ``UpdateAppProfile`` to fail the request if there - has been a modification in the mean time. The - ``update_mask`` of the request need not include ``etag`` for - this protection to apply. See - `Wikipedia `__ and - `RFC - 7232 `__ - for more details. - description (str): - Long form description of the use case for - this AppProfile. - multi_cluster_routing_use_any (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny): - Use a multi-cluster routing policy. - - This field is a member of `oneof`_ ``routing_policy``. - single_cluster_routing (google.cloud.bigtable_admin_v2.types.AppProfile.SingleClusterRouting): - Use a single-cluster routing policy. - - This field is a member of `oneof`_ ``routing_policy``. - priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority): - This field has been deprecated in favor of - ``standard_isolation.priority``. If you set this field, - ``standard_isolation.priority`` will be set instead. - - The priority of requests sent using this app profile. - - This field is a member of `oneof`_ ``isolation``. - standard_isolation (google.cloud.bigtable_admin_v2.types.AppProfile.StandardIsolation): - The standard options used for isolating this - app profile's traffic from other use cases. - - This field is a member of `oneof`_ ``isolation``. - """ - class Priority(proto.Enum): - r"""Possible priorities for an app profile. Note that higher - priority writes can sometimes queue behind lower priority writes - to the same tablet, as writes must be strictly sequenced in the - durability log. - - Values: - PRIORITY_UNSPECIFIED (0): - Default value. Mapped to PRIORITY_HIGH (the legacy behavior) - on creation. - PRIORITY_LOW (1): - No description available. - PRIORITY_MEDIUM (2): - No description available. - PRIORITY_HIGH (3): - No description available. - """ - PRIORITY_UNSPECIFIED = 0 - PRIORITY_LOW = 1 - PRIORITY_MEDIUM = 2 - PRIORITY_HIGH = 3 - - class MultiClusterRoutingUseAny(proto.Message): - r"""Read/write requests are routed to the nearest cluster in the - instance, and will fail over to the nearest cluster that is - available in the event of transient errors or delays. Clusters - in a region are considered equidistant. Choosing this option - sacrifices read-your-writes consistency to improve availability. - - Attributes: - cluster_ids (MutableSequence[str]): - The set of clusters to route to. The order is - ignored; clusters will be tried in order of - distance. If left empty, all clusters are - eligible. - """ - - cluster_ids: MutableSequence[str] = proto.RepeatedField( - proto.STRING, - number=1, - ) - - class SingleClusterRouting(proto.Message): - r"""Unconditionally routes all read/write requests to a specific - cluster. This option preserves read-your-writes consistency but - does not improve availability. - - Attributes: - cluster_id (str): - The cluster to which read/write requests - should be routed. - allow_transactional_writes (bool): - Whether or not ``CheckAndMutateRow`` and - ``ReadModifyWriteRow`` requests are allowed by this app - profile. It is unsafe to send these requests to the same - table/row/column in multiple clusters. - """ - - cluster_id: str = proto.Field( - proto.STRING, - number=1, - ) - allow_transactional_writes: bool = proto.Field( - proto.BOOL, - number=2, - ) - - class StandardIsolation(proto.Message): - r"""Standard options for isolating this app profile's traffic - from other use cases. - - Attributes: - priority (google.cloud.bigtable_admin_v2.types.AppProfile.Priority): - The priority of requests sent using this app - profile. - """ - - priority: 'AppProfile.Priority' = proto.Field( - proto.ENUM, - number=1, - enum='AppProfile.Priority', - ) - - name: str = proto.Field( - proto.STRING, - number=1, - ) - etag: str = proto.Field( - proto.STRING, - number=2, - ) - description: str = proto.Field( - proto.STRING, - number=3, - ) - multi_cluster_routing_use_any: MultiClusterRoutingUseAny = proto.Field( - proto.MESSAGE, - number=5, - oneof='routing_policy', - message=MultiClusterRoutingUseAny, - ) - single_cluster_routing: SingleClusterRouting = proto.Field( - proto.MESSAGE, - number=6, - oneof='routing_policy', - message=SingleClusterRouting, - ) - priority: Priority = proto.Field( - proto.ENUM, - number=7, - oneof='isolation', - enum=Priority, - ) - standard_isolation: StandardIsolation = proto.Field( - proto.MESSAGE, - number=11, - oneof='isolation', - message=StandardIsolation, - ) - - -class HotTablet(proto.Message): - r"""A tablet is a defined by a start and end key and is explained - in https://cloud.google.com/bigtable/docs/overview#architecture - and - https://cloud.google.com/bigtable/docs/performance#optimization. - A Hot tablet is a tablet that exhibits high average cpu usage - during the time interval from start time to end time. - - Attributes: - name (str): - The unique name of the hot tablet. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/[a-zA-Z0-9_-]*``. - table_name (str): - Name of the table that contains the tablet. Values are of - the form - ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The start time of the hot - tablet. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The end time of the hot tablet. - start_key (str): - Tablet Start Key (inclusive). - end_key (str): - Tablet End Key (inclusive). - node_cpu_usage_percent (float): - Output only. The average CPU usage spent by a node on this - tablet over the start_time to end_time time range. The - percentage is the amount of CPU used by the node to serve - the tablet, from 0% (tablet was not interacted with) to 100% - (the node spent all cycles serving the hot tablet). - """ - - name: str = proto.Field( - proto.STRING, - number=1, - ) - table_name: str = proto.Field( - proto.STRING, - number=2, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - start_key: str = proto.Field( - proto.STRING, - number=5, - ) - end_key: str = proto.Field( - proto.STRING, - number=6, - ) - node_cpu_usage_percent: float = proto.Field( - proto.FLOAT, - number=7, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/table.py b/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/table.py deleted file mode 100644 index b618f1a91..000000000 --- a/owl-bot-staging/bigtable_admin/v2/google/cloud/bigtable_admin_v2/types/table.py +++ /dev/null @@ -1,727 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -from __future__ import annotations - -from typing import MutableMapping, MutableSequence - -import proto # type: ignore - -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore - - -__protobuf__ = proto.module( - package='google.bigtable.admin.v2', - manifest={ - 'RestoreSourceType', - 'RestoreInfo', - 'ChangeStreamConfig', - 'Table', - 'ColumnFamily', - 'GcRule', - 'EncryptionInfo', - 'Snapshot', - 'Backup', - 'BackupInfo', - }, -) - - -class RestoreSourceType(proto.Enum): - r"""Indicates the type of the restore source. - - Values: - RESTORE_SOURCE_TYPE_UNSPECIFIED (0): - No restore associated. - BACKUP (1): - A backup was used as the source of the - restore. - """ - RESTORE_SOURCE_TYPE_UNSPECIFIED = 0 - BACKUP = 1 - - -class RestoreInfo(proto.Message): - r"""Information about a table restore. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - source_type (google.cloud.bigtable_admin_v2.types.RestoreSourceType): - The type of the restore source. - backup_info (google.cloud.bigtable_admin_v2.types.BackupInfo): - Information about the backup used to restore - the table. The backup may no longer exist. - - This field is a member of `oneof`_ ``source_info``. - """ - - source_type: 'RestoreSourceType' = proto.Field( - proto.ENUM, - number=1, - enum='RestoreSourceType', - ) - backup_info: 'BackupInfo' = proto.Field( - proto.MESSAGE, - number=2, - oneof='source_info', - message='BackupInfo', - ) - - -class ChangeStreamConfig(proto.Message): - r"""Change stream configuration. - - Attributes: - retention_period (google.protobuf.duration_pb2.Duration): - How long the change stream should be - retained. Change stream data older than the - retention period will not be returned when - reading the change stream from the table. - Values must be at least 1 day and at most 7 - days, and will be truncated to microsecond - granularity. - """ - - retention_period: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=1, - message=duration_pb2.Duration, - ) - - -class Table(proto.Message): - r"""A collection of user data indexed by row, column, and - timestamp. Each table is served using the resources of its - parent cluster. - - Attributes: - name (str): - The unique name of the table. Values are of the form - ``projects/{project}/instances/{instance}/tables/[_a-zA-Z0-9][-_.a-zA-Z0-9]*``. - Views: ``NAME_ONLY``, ``SCHEMA_VIEW``, ``REPLICATION_VIEW``, - ``FULL`` - cluster_states (MutableMapping[str, google.cloud.bigtable_admin_v2.types.Table.ClusterState]): - Output only. Map from cluster ID to per-cluster table state. - If it could not be determined whether or not the table has - data in a particular cluster (for example, if its zone is - unavailable), then there will be an entry for the cluster - with UNKNOWN ``replication_status``. Views: - ``REPLICATION_VIEW``, ``ENCRYPTION_VIEW``, ``FULL`` - column_families (MutableMapping[str, google.cloud.bigtable_admin_v2.types.ColumnFamily]): - The column families configured for this table, mapped by - column family ID. Views: ``SCHEMA_VIEW``, ``STATS_VIEW``, - ``FULL`` - granularity (google.cloud.bigtable_admin_v2.types.Table.TimestampGranularity): - Immutable. The granularity (i.e. ``MILLIS``) at which - timestamps are stored in this table. Timestamps not matching - the granularity will be rejected. If unspecified at creation - time, the value will be set to ``MILLIS``. Views: - ``SCHEMA_VIEW``, ``FULL``. - restore_info (google.cloud.bigtable_admin_v2.types.RestoreInfo): - Output only. If this table was restored from - another data source (e.g. a backup), this field - will be populated with information about the - restore. - change_stream_config (google.cloud.bigtable_admin_v2.types.ChangeStreamConfig): - If specified, enable the change stream on - this table. Otherwise, the change stream is - disabled and the change stream is not retained. - deletion_protection (bool): - Set to true to make the table protected against data loss. - i.e. deleting the following resources through Admin APIs are - prohibited: - - - The table. - - The column families in the table. - - The instance containing the table. - - Note one can still delete the data stored in the table - through Data APIs. - """ - class TimestampGranularity(proto.Enum): - r"""Possible timestamp granularities to use when keeping multiple - versions of data in a table. - - Values: - TIMESTAMP_GRANULARITY_UNSPECIFIED (0): - The user did not specify a granularity. - Should not be returned. When specified during - table creation, MILLIS will be used. - MILLIS (1): - The table keeps data versioned at a - granularity of 1ms. - """ - TIMESTAMP_GRANULARITY_UNSPECIFIED = 0 - MILLIS = 1 - - class View(proto.Enum): - r"""Defines a view over a table's fields. - - Values: - VIEW_UNSPECIFIED (0): - Uses the default view for each method as - documented in its request. - NAME_ONLY (1): - Only populates ``name``. - SCHEMA_VIEW (2): - Only populates ``name`` and fields related to the table's - schema. - REPLICATION_VIEW (3): - Only populates ``name`` and fields related to the table's - replication state. - ENCRYPTION_VIEW (5): - Only populates ``name`` and fields related to the table's - encryption state. - FULL (4): - Populates all fields. - """ - VIEW_UNSPECIFIED = 0 - NAME_ONLY = 1 - SCHEMA_VIEW = 2 - REPLICATION_VIEW = 3 - ENCRYPTION_VIEW = 5 - FULL = 4 - - class ClusterState(proto.Message): - r"""The state of a table's data in a particular cluster. - - Attributes: - replication_state (google.cloud.bigtable_admin_v2.types.Table.ClusterState.ReplicationState): - Output only. The state of replication for the - table in this cluster. - encryption_info (MutableSequence[google.cloud.bigtable_admin_v2.types.EncryptionInfo]): - Output only. The encryption information for - the table in this cluster. If the encryption key - protecting this resource is customer managed, - then its version can be rotated in Cloud Key - Management Service (Cloud KMS). The primary - version of the key and its status will be - reflected here when changes propagate from Cloud - KMS. - """ - class ReplicationState(proto.Enum): - r"""Table replication states. - - Values: - STATE_NOT_KNOWN (0): - The replication state of the table is unknown - in this cluster. - INITIALIZING (1): - The cluster was recently created, and the - table must finish copying over pre-existing data - from other clusters before it can begin - receiving live replication updates and serving - Data API requests. - PLANNED_MAINTENANCE (2): - The table is temporarily unable to serve Data - API requests from this cluster due to planned - internal maintenance. - UNPLANNED_MAINTENANCE (3): - The table is temporarily unable to serve Data - API requests from this cluster due to unplanned - or emergency maintenance. - READY (4): - The table can serve Data API requests from - this cluster. Depending on replication delay, - reads may not immediately reflect the state of - the table in other clusters. - READY_OPTIMIZING (5): - The table is fully created and ready for use after a - restore, and is being optimized for performance. When - optimizations are complete, the table will transition to - ``READY`` state. - """ - STATE_NOT_KNOWN = 0 - INITIALIZING = 1 - PLANNED_MAINTENANCE = 2 - UNPLANNED_MAINTENANCE = 3 - READY = 4 - READY_OPTIMIZING = 5 - - replication_state: 'Table.ClusterState.ReplicationState' = proto.Field( - proto.ENUM, - number=1, - enum='Table.ClusterState.ReplicationState', - ) - encryption_info: MutableSequence['EncryptionInfo'] = proto.RepeatedField( - proto.MESSAGE, - number=2, - message='EncryptionInfo', - ) - - name: str = proto.Field( - proto.STRING, - number=1, - ) - cluster_states: MutableMapping[str, ClusterState] = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=2, - message=ClusterState, - ) - column_families: MutableMapping[str, 'ColumnFamily'] = proto.MapField( - proto.STRING, - proto.MESSAGE, - number=3, - message='ColumnFamily', - ) - granularity: TimestampGranularity = proto.Field( - proto.ENUM, - number=4, - enum=TimestampGranularity, - ) - restore_info: 'RestoreInfo' = proto.Field( - proto.MESSAGE, - number=6, - message='RestoreInfo', - ) - change_stream_config: 'ChangeStreamConfig' = proto.Field( - proto.MESSAGE, - number=8, - message='ChangeStreamConfig', - ) - deletion_protection: bool = proto.Field( - proto.BOOL, - number=9, - ) - - -class ColumnFamily(proto.Message): - r"""A set of columns within a table which share a common - configuration. - - Attributes: - gc_rule (google.cloud.bigtable_admin_v2.types.GcRule): - Garbage collection rule specified as a - protobuf. Must serialize to at most 500 bytes. - - NOTE: Garbage collection executes - opportunistically in the background, and so it's - possible for reads to return a cell even if it - matches the active GC expression for its family. - """ - - gc_rule: 'GcRule' = proto.Field( - proto.MESSAGE, - number=1, - message='GcRule', - ) - - -class GcRule(proto.Message): - r"""Rule for determining which cells to delete during garbage - collection. - - This message has `oneof`_ fields (mutually exclusive fields). - For each oneof, at most one member field can be set at the same time. - Setting any member of the oneof automatically clears all other - members. - - .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields - - Attributes: - max_num_versions (int): - Delete all cells in a column except the most - recent N. - - This field is a member of `oneof`_ ``rule``. - max_age (google.protobuf.duration_pb2.Duration): - Delete cells in a column older than the given - age. Values must be at least one millisecond, - and will be truncated to microsecond - granularity. - - This field is a member of `oneof`_ ``rule``. - intersection (google.cloud.bigtable_admin_v2.types.GcRule.Intersection): - Delete cells that would be deleted by every - nested rule. - - This field is a member of `oneof`_ ``rule``. - union (google.cloud.bigtable_admin_v2.types.GcRule.Union): - Delete cells that would be deleted by any - nested rule. - - This field is a member of `oneof`_ ``rule``. - """ - - class Intersection(proto.Message): - r"""A GcRule which deletes cells matching all of the given rules. - - Attributes: - rules (MutableSequence[google.cloud.bigtable_admin_v2.types.GcRule]): - Only delete cells which would be deleted by every element of - ``rules``. - """ - - rules: MutableSequence['GcRule'] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='GcRule', - ) - - class Union(proto.Message): - r"""A GcRule which deletes cells matching any of the given rules. - - Attributes: - rules (MutableSequence[google.cloud.bigtable_admin_v2.types.GcRule]): - Delete cells which would be deleted by any element of - ``rules``. - """ - - rules: MutableSequence['GcRule'] = proto.RepeatedField( - proto.MESSAGE, - number=1, - message='GcRule', - ) - - max_num_versions: int = proto.Field( - proto.INT32, - number=1, - oneof='rule', - ) - max_age: duration_pb2.Duration = proto.Field( - proto.MESSAGE, - number=2, - oneof='rule', - message=duration_pb2.Duration, - ) - intersection: Intersection = proto.Field( - proto.MESSAGE, - number=3, - oneof='rule', - message=Intersection, - ) - union: Union = proto.Field( - proto.MESSAGE, - number=4, - oneof='rule', - message=Union, - ) - - -class EncryptionInfo(proto.Message): - r"""Encryption information for a given resource. - If this resource is protected with customer managed encryption, - the in-use Cloud Key Management Service (Cloud KMS) key version - is specified along with its status. - - Attributes: - encryption_type (google.cloud.bigtable_admin_v2.types.EncryptionInfo.EncryptionType): - Output only. The type of encryption used to - protect this resource. - encryption_status (google.rpc.status_pb2.Status): - Output only. The status of encrypt/decrypt - calls on underlying data for this resource. - Regardless of status, the existing data is - always encrypted at rest. - kms_key_version (str): - Output only. The version of the Cloud KMS key - specified in the parent cluster that is in use - for the data underlying this table. - """ - class EncryptionType(proto.Enum): - r"""Possible encryption types for a resource. - - Values: - ENCRYPTION_TYPE_UNSPECIFIED (0): - Encryption type was not specified, though - data at rest remains encrypted. - GOOGLE_DEFAULT_ENCRYPTION (1): - The data backing this resource is encrypted - at rest with a key that is fully managed by - Google. No key version or status will be - populated. This is the default state. - CUSTOMER_MANAGED_ENCRYPTION (2): - The data backing this resource is encrypted at rest with a - key that is managed by the customer. The in-use version of - the key and its status are populated for CMEK-protected - tables. CMEK-protected backups are pinned to the key version - that was in use at the time the backup was taken. This key - version is populated but its status is not tracked and is - reported as ``UNKNOWN``. - """ - ENCRYPTION_TYPE_UNSPECIFIED = 0 - GOOGLE_DEFAULT_ENCRYPTION = 1 - CUSTOMER_MANAGED_ENCRYPTION = 2 - - encryption_type: EncryptionType = proto.Field( - proto.ENUM, - number=3, - enum=EncryptionType, - ) - encryption_status: status_pb2.Status = proto.Field( - proto.MESSAGE, - number=4, - message=status_pb2.Status, - ) - kms_key_version: str = proto.Field( - proto.STRING, - number=2, - ) - - -class Snapshot(proto.Message): - r"""A snapshot of a table at a particular time. A snapshot can be - used as a checkpoint for data restoration or a data source for a - new table. - - Note: This is a private alpha release of Cloud Bigtable - snapshots. This feature is not currently available to most Cloud - Bigtable customers. This feature might be changed in - backward-incompatible ways and is not recommended for production - use. It is not subject to any SLA or deprecation policy. - - Attributes: - name (str): - The unique name of the snapshot. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}``. - source_table (google.cloud.bigtable_admin_v2.types.Table): - Output only. The source table at the time the - snapshot was taken. - data_size_bytes (int): - Output only. The size of the data in the - source table at the time the snapshot was taken. - In some cases, this value may be computed - asynchronously via a background process and a - placeholder of 0 will be used in the meantime. - create_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time when the snapshot is - created. - delete_time (google.protobuf.timestamp_pb2.Timestamp): - The time when the snapshot will be deleted. - The maximum amount of time a snapshot can stay - active is 365 days. If 'ttl' is not specified, - the default maximum of 365 days will be used. - state (google.cloud.bigtable_admin_v2.types.Snapshot.State): - Output only. The current state of the - snapshot. - description (str): - Description of the snapshot. - """ - class State(proto.Enum): - r"""Possible states of a snapshot. - - Values: - STATE_NOT_KNOWN (0): - The state of the snapshot could not be - determined. - READY (1): - The snapshot has been successfully created - and can serve all requests. - CREATING (2): - The snapshot is currently being created, and - may be destroyed if the creation process - encounters an error. A snapshot may not be - restored to a table while it is being created. - """ - STATE_NOT_KNOWN = 0 - READY = 1 - CREATING = 2 - - name: str = proto.Field( - proto.STRING, - number=1, - ) - source_table: 'Table' = proto.Field( - proto.MESSAGE, - number=2, - message='Table', - ) - data_size_bytes: int = proto.Field( - proto.INT64, - number=3, - ) - create_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - delete_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - state: State = proto.Field( - proto.ENUM, - number=6, - enum=State, - ) - description: str = proto.Field( - proto.STRING, - number=7, - ) - - -class Backup(proto.Message): - r"""A backup of a Cloud Bigtable table. - - Attributes: - name (str): - A globally unique identifier for the backup which cannot be - changed. Values are of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}/ backups/[_a-zA-Z0-9][-_.a-zA-Z0-9]*`` - The final segment of the name must be between 1 and 50 - characters in length. - - The backup is stored in the cluster identified by the prefix - of the backup name of the form - ``projects/{project}/instances/{instance}/clusters/{cluster}``. - source_table (str): - Required. Immutable. Name of the table from which this - backup was created. This needs to be in the same instance as - the backup. Values are of the form - ``projects/{project}/instances/{instance}/tables/{source_table}``. - source_backup (str): - Output only. Name of the backup from which - this backup was copied. If a backup is not - created by copying a backup, this field will be - empty. Values are of the form: - projects//instances//backups/. - expire_time (google.protobuf.timestamp_pb2.Timestamp): - Required. The expiration time of the backup, with - microseconds granularity that must be at least 6 hours and - at most 90 days from the time the request is received. Once - the ``expire_time`` has passed, Cloud Bigtable will delete - the backup and free the resources used by the backup. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. ``start_time`` is the time that the backup was - started (i.e. approximately the time the - [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup] - request is received). The row data in this backup will be no - older than this timestamp. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. ``end_time`` is the time that the backup was - finished. The row data in the backup will be no newer than - this timestamp. - size_bytes (int): - Output only. Size of the backup in bytes. - state (google.cloud.bigtable_admin_v2.types.Backup.State): - Output only. The current state of the backup. - encryption_info (google.cloud.bigtable_admin_v2.types.EncryptionInfo): - Output only. The encryption information for - the backup. - """ - class State(proto.Enum): - r"""Indicates the current state of the backup. - - Values: - STATE_UNSPECIFIED (0): - Not specified. - CREATING (1): - The pending backup is still being created. Operations on the - backup may fail with ``FAILED_PRECONDITION`` in this state. - READY (2): - The backup is complete and ready for use. - """ - STATE_UNSPECIFIED = 0 - CREATING = 1 - READY = 2 - - name: str = proto.Field( - proto.STRING, - number=1, - ) - source_table: str = proto.Field( - proto.STRING, - number=2, - ) - source_backup: str = proto.Field( - proto.STRING, - number=10, - ) - expire_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=4, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=5, - message=timestamp_pb2.Timestamp, - ) - size_bytes: int = proto.Field( - proto.INT64, - number=6, - ) - state: State = proto.Field( - proto.ENUM, - number=7, - enum=State, - ) - encryption_info: 'EncryptionInfo' = proto.Field( - proto.MESSAGE, - number=9, - message='EncryptionInfo', - ) - - -class BackupInfo(proto.Message): - r"""Information about a backup. - - Attributes: - backup (str): - Output only. Name of the backup. - start_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. The time that the backup was - started. Row data in the backup will be no older - than this timestamp. - end_time (google.protobuf.timestamp_pb2.Timestamp): - Output only. This time that the backup was - finished. Row data in the backup will be no - newer than this timestamp. - source_table (str): - Output only. Name of the table the backup was - created from. - source_backup (str): - Output only. Name of the backup from which - this backup was copied. If a backup is not - created by copying a backup, this field will be - empty. Values are of the form: - projects//instances//backups/. - """ - - backup: str = proto.Field( - proto.STRING, - number=1, - ) - start_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=2, - message=timestamp_pb2.Timestamp, - ) - end_time: timestamp_pb2.Timestamp = proto.Field( - proto.MESSAGE, - number=3, - message=timestamp_pb2.Timestamp, - ) - source_table: str = proto.Field( - proto.STRING, - number=4, - ) - source_backup: str = proto.Field( - proto.STRING, - number=10, - ) - - -__all__ = tuple(sorted(__protobuf__.manifest)) diff --git a/owl-bot-staging/bigtable_admin/v2/mypy.ini b/owl-bot-staging/bigtable_admin/v2/mypy.ini deleted file mode 100644 index 574c5aed3..000000000 --- a/owl-bot-staging/bigtable_admin/v2/mypy.ini +++ /dev/null @@ -1,3 +0,0 @@ -[mypy] -python_version = 3.7 -namespace_packages = True diff --git a/owl-bot-staging/bigtable_admin/v2/noxfile.py b/owl-bot-staging/bigtable_admin/v2/noxfile.py deleted file mode 100644 index 7625f764b..000000000 --- a/owl-bot-staging/bigtable_admin/v2/noxfile.py +++ /dev/null @@ -1,177 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -import pathlib -import shutil -import subprocess -import sys - - -import nox # type: ignore - -ALL_PYTHON = [ - "3.7", - "3.8", - "3.9", - "3.10", - "3.11", - "3.12" -] - -CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() - -LOWER_BOUND_CONSTRAINTS_FILE = CURRENT_DIRECTORY / "constraints.txt" -PACKAGE_NAME = 'google-cloud-bigtable-admin' - -BLACK_VERSION = "black==22.3.0" -BLACK_PATHS = ["docs", "google", "tests", "samples", "noxfile.py", "setup.py"] -DEFAULT_PYTHON_VERSION = "3.12" - -nox.sessions = [ - "unit", - "cover", - "mypy", - "check_lower_bounds" - # exclude update_lower_bounds from default - "docs", - "blacken", - "lint", -] - -@nox.session(python=ALL_PYTHON) -def unit(session): - """Run the unit test suite.""" - - session.install('coverage', 'pytest', 'pytest-cov', 'pytest-asyncio', 'asyncmock; python_version < "3.8"') - session.install('-e', '.') - - session.run( - 'py.test', - '--quiet', - '--cov=google/cloud/bigtable_admin_v2/', - '--cov=tests/', - '--cov-config=.coveragerc', - '--cov-report=term', - '--cov-report=html', - os.path.join('tests', 'unit', ''.join(session.posargs)) - ) - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def cover(session): - """Run the final coverage report. - This outputs the coverage report aggregating coverage from the unit - test runs (not system test runs), and then erases coverage data. - """ - session.install("coverage", "pytest-cov") - session.run("coverage", "report", "--show-missing", "--fail-under=100") - - session.run("coverage", "erase") - - -@nox.session(python=ALL_PYTHON) -def mypy(session): - """Run the type checker.""" - session.install( - 'mypy', - 'types-requests', - 'types-protobuf' - ) - session.install('.') - session.run( - 'mypy', - '-p', - 'google', - ) - - -@nox.session -def update_lower_bounds(session): - """Update lower bounds in constraints.txt to match setup.py""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'update', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - - -@nox.session -def check_lower_bounds(session): - """Check lower bounds in setup.py are reflected in constraints file""" - session.install('google-cloud-testutils') - session.install('.') - - session.run( - 'lower-bound-checker', - 'check', - '--package-name', - PACKAGE_NAME, - '--constraints-file', - str(LOWER_BOUND_CONSTRAINTS_FILE), - ) - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def docs(session): - """Build the docs for this library.""" - - session.install("-e", ".") - session.install("sphinx==7.0.1", "alabaster", "recommonmark") - - shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) - session.run( - "sphinx-build", - "-W", # warnings as errors - "-T", # show full traceback on exception - "-N", # no colors - "-b", - "html", - "-d", - os.path.join("docs", "_build", "doctrees", ""), - os.path.join("docs", ""), - os.path.join("docs", "_build", "html", ""), - ) - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def lint(session): - """Run linters. - - Returns a failure if the linters find linting errors or sufficiently - serious code quality issues. - """ - session.install("flake8", BLACK_VERSION) - session.run( - "black", - "--check", - *BLACK_PATHS, - ) - session.run("flake8", "google", "tests", "samples") - - -@nox.session(python=DEFAULT_PYTHON_VERSION) -def blacken(session): - """Run black. Format code to uniform standard.""" - session.install(BLACK_VERSION) - session.run( - "black", - *BLACK_PATHS, - ) diff --git a/owl-bot-staging/bigtable_admin/v2/scripts/fixup_bigtable_admin_v2_keywords.py b/owl-bot-staging/bigtable_admin/v2/scripts/fixup_bigtable_admin_v2_keywords.py deleted file mode 100644 index 8c3efea10..000000000 --- a/owl-bot-staging/bigtable_admin/v2/scripts/fixup_bigtable_admin_v2_keywords.py +++ /dev/null @@ -1,218 +0,0 @@ -#! /usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import argparse -import os -import libcst as cst -import pathlib -import sys -from typing import (Any, Callable, Dict, List, Sequence, Tuple) - - -def partition( - predicate: Callable[[Any], bool], - iterator: Sequence[Any] -) -> Tuple[List[Any], List[Any]]: - """A stable, out-of-place partition.""" - results = ([], []) - - for i in iterator: - results[int(predicate(i))].append(i) - - # Returns trueList, falseList - return results[1], results[0] - - -class bigtable_adminCallTransformer(cst.CSTTransformer): - CTRL_PARAMS: Tuple[str] = ('retry', 'timeout', 'metadata') - METHOD_TO_PARAMS: Dict[str, Tuple[str]] = { - 'check_consistency': ('name', 'consistency_token', ), - 'copy_backup': ('parent', 'backup_id', 'source_backup', 'expire_time', ), - 'create_app_profile': ('parent', 'app_profile_id', 'app_profile', 'ignore_warnings', ), - 'create_backup': ('parent', 'backup_id', 'backup', ), - 'create_cluster': ('parent', 'cluster_id', 'cluster', ), - 'create_instance': ('parent', 'instance_id', 'instance', 'clusters', ), - 'create_table': ('parent', 'table_id', 'table', 'initial_splits', ), - 'create_table_from_snapshot': ('parent', 'table_id', 'source_snapshot', ), - 'delete_app_profile': ('name', 'ignore_warnings', ), - 'delete_backup': ('name', ), - 'delete_cluster': ('name', ), - 'delete_instance': ('name', ), - 'delete_snapshot': ('name', ), - 'delete_table': ('name', ), - 'drop_row_range': ('name', 'row_key_prefix', 'delete_all_data_from_table', ), - 'generate_consistency_token': ('name', ), - 'get_app_profile': ('name', ), - 'get_backup': ('name', ), - 'get_cluster': ('name', ), - 'get_iam_policy': ('resource', 'options', ), - 'get_instance': ('name', ), - 'get_snapshot': ('name', ), - 'get_table': ('name', 'view', ), - 'list_app_profiles': ('parent', 'page_size', 'page_token', ), - 'list_backups': ('parent', 'filter', 'order_by', 'page_size', 'page_token', ), - 'list_clusters': ('parent', 'page_token', ), - 'list_hot_tablets': ('parent', 'start_time', 'end_time', 'page_size', 'page_token', ), - 'list_instances': ('parent', 'page_token', ), - 'list_snapshots': ('parent', 'page_size', 'page_token', ), - 'list_tables': ('parent', 'view', 'page_size', 'page_token', ), - 'modify_column_families': ('name', 'modifications', 'ignore_warnings', ), - 'partial_update_cluster': ('cluster', 'update_mask', ), - 'partial_update_instance': ('instance', 'update_mask', ), - 'restore_table': ('parent', 'table_id', 'backup', ), - 'set_iam_policy': ('resource', 'policy', 'update_mask', ), - 'snapshot_table': ('name', 'cluster', 'snapshot_id', 'ttl', 'description', ), - 'test_iam_permissions': ('resource', 'permissions', ), - 'undelete_table': ('name', ), - 'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ), - 'update_backup': ('backup', 'update_mask', ), - 'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'cluster_config', 'default_storage_type', 'encryption_config', ), - 'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', ), - 'update_table': ('table', 'update_mask', ), - } - - def leave_Call(self, original: cst.Call, updated: cst.Call) -> cst.CSTNode: - try: - key = original.func.attr.value - kword_params = self.METHOD_TO_PARAMS[key] - except (AttributeError, KeyError): - # Either not a method from the API or too convoluted to be sure. - return updated - - # If the existing code is valid, keyword args come after positional args. - # Therefore, all positional args must map to the first parameters. - args, kwargs = partition(lambda a: not bool(a.keyword), updated.args) - if any(k.keyword.value == "request" for k in kwargs): - # We've already fixed this file, don't fix it again. - return updated - - kwargs, ctrl_kwargs = partition( - lambda a: a.keyword.value not in self.CTRL_PARAMS, - kwargs - ) - - args, ctrl_args = args[:len(kword_params)], args[len(kword_params):] - ctrl_kwargs.extend(cst.Arg(value=a.value, keyword=cst.Name(value=ctrl)) - for a, ctrl in zip(ctrl_args, self.CTRL_PARAMS)) - - request_arg = cst.Arg( - value=cst.Dict([ - cst.DictElement( - cst.SimpleString("'{}'".format(name)), -cst.Element(value=arg.value) - ) - # Note: the args + kwargs looks silly, but keep in mind that - # the control parameters had to be stripped out, and that - # those could have been passed positionally or by keyword. - for name, arg in zip(kword_params, args + kwargs)]), - keyword=cst.Name("request") - ) - - return updated.with_changes( - args=[request_arg] + ctrl_kwargs - ) - - -def fix_files( - in_dir: pathlib.Path, - out_dir: pathlib.Path, - *, - transformer=bigtable_adminCallTransformer(), -): - """Duplicate the input dir to the output dir, fixing file method calls. - - Preconditions: - * in_dir is a real directory - * out_dir is a real, empty directory - """ - pyfile_gen = ( - pathlib.Path(os.path.join(root, f)) - for root, _, files in os.walk(in_dir) - for f in files if os.path.splitext(f)[1] == ".py" - ) - - for fpath in pyfile_gen: - with open(fpath, 'r') as f: - src = f.read() - - # Parse the code and insert method call fixes. - tree = cst.parse_module(src) - updated = tree.visit(transformer) - - # Create the path and directory structure for the new file. - updated_path = out_dir.joinpath(fpath.relative_to(in_dir)) - updated_path.parent.mkdir(parents=True, exist_ok=True) - - # Generate the updated source file at the corresponding path. - with open(updated_path, 'w') as f: - f.write(updated.code) - - -if __name__ == '__main__': - parser = argparse.ArgumentParser( - description="""Fix up source that uses the bigtable_admin client library. - -The existing sources are NOT overwritten but are copied to output_dir with changes made. - -Note: This tool operates at a best-effort level at converting positional - parameters in client method calls to keyword based parameters. - Cases where it WILL FAIL include - A) * or ** expansion in a method call. - B) Calls via function or method alias (includes free function calls) - C) Indirect or dispatched calls (e.g. the method is looked up dynamically) - - These all constitute false negatives. The tool will also detect false - positives when an API method shares a name with another method. -""") - parser.add_argument( - '-d', - '--input-directory', - required=True, - dest='input_dir', - help='the input directory to walk for python files to fix up', - ) - parser.add_argument( - '-o', - '--output-directory', - required=True, - dest='output_dir', - help='the directory to output files fixed via un-flattening', - ) - args = parser.parse_args() - input_dir = pathlib.Path(args.input_dir) - output_dir = pathlib.Path(args.output_dir) - if not input_dir.is_dir(): - print( - f"input directory '{input_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if not output_dir.is_dir(): - print( - f"output directory '{output_dir}' does not exist or is not a directory", - file=sys.stderr, - ) - sys.exit(-1) - - if os.listdir(output_dir): - print( - f"output directory '{output_dir}' is not empty", - file=sys.stderr, - ) - sys.exit(-1) - - fix_files(input_dir, output_dir) diff --git a/owl-bot-staging/bigtable_admin/v2/setup.py b/owl-bot-staging/bigtable_admin/v2/setup.py deleted file mode 100644 index 755cbb49b..000000000 --- a/owl-bot-staging/bigtable_admin/v2/setup.py +++ /dev/null @@ -1,91 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import io -import os -import re - -import setuptools # type: ignore - -package_root = os.path.abspath(os.path.dirname(__file__)) - -name = 'google-cloud-bigtable-admin' - - -description = "Google Cloud Bigtable Admin API client library" - -version = None - -with open(os.path.join(package_root, 'google/cloud/bigtable_admin/gapic_version.py')) as fp: - version_candidates = re.findall(r"(?<=\")\d+.\d+.\d+(?=\")", fp.read()) - assert (len(version_candidates) == 1) - version = version_candidates[0] - -if version[0] == "0": - release_status = "Development Status :: 4 - Beta" -else: - release_status = "Development Status :: 5 - Production/Stable" - -dependencies = [ - "google-api-core[grpc] >= 1.34.0, <3.0.0dev,!=2.0.*,!=2.1.*,!=2.2.*,!=2.3.*,!=2.4.*,!=2.5.*,!=2.6.*,!=2.7.*,!=2.8.*,!=2.9.*,!=2.10.*", - "proto-plus >= 1.22.3, <2.0.0dev", - "protobuf>=3.19.5,<5.0.0dev,!=3.20.0,!=3.20.1,!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5", - "grpc-google-iam-v1 >= 0.12.4, <1.0.0dev", -] -url = "https://github.com/googleapis/google-cloud-python/tree/main/packages/google-cloud-bigtable-admin" - -package_root = os.path.abspath(os.path.dirname(__file__)) - -readme_filename = os.path.join(package_root, "README.rst") -with io.open(readme_filename, encoding="utf-8") as readme_file: - readme = readme_file.read() - -packages = [ - package - for package in setuptools.find_namespace_packages() - if package.startswith("google") -] - -setuptools.setup( - name=name, - version=version, - description=description, - long_description=readme, - author="Google LLC", - author_email="googleapis-packages@google.com", - license="Apache 2.0", - url=url, - classifiers=[ - release_status, - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python :: 3.11", - "Programming Language :: Python :: 3.12", - "Operating System :: OS Independent", - "Topic :: Internet", - ], - platforms="Posix; MacOS X; Windows", - packages=packages, - python_requires=">=3.7", - install_requires=dependencies, - include_package_data=True, - zip_safe=False, -) diff --git a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.10.txt b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.10.txt deleted file mode 100644 index ad3f0fa58..000000000 --- a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.10.txt +++ /dev/null @@ -1,7 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf -grpc-google-iam-v1 diff --git a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.11.txt b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.11.txt deleted file mode 100644 index ad3f0fa58..000000000 --- a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.11.txt +++ /dev/null @@ -1,7 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf -grpc-google-iam-v1 diff --git a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.12.txt b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.12.txt deleted file mode 100644 index ad3f0fa58..000000000 --- a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.12.txt +++ /dev/null @@ -1,7 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf -grpc-google-iam-v1 diff --git a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.7.txt b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.7.txt deleted file mode 100644 index 44ffd0454..000000000 --- a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.7.txt +++ /dev/null @@ -1,10 +0,0 @@ -# This constraints file is used to check that lower bounds -# are correct in setup.py -# List all library dependencies and extras in this file. -# Pin the version to the lower bound. -# e.g., if setup.py has "google-cloud-foo >= 1.14.0, < 2.0.0dev", -# Then this file should have google-cloud-foo==1.14.0 -google-api-core==1.34.0 -proto-plus==1.22.3 -protobuf==3.19.5 -grpc-google-iam-v1==0.12.4 diff --git a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.8.txt b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.8.txt deleted file mode 100644 index ad3f0fa58..000000000 --- a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.8.txt +++ /dev/null @@ -1,7 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf -grpc-google-iam-v1 diff --git a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.9.txt b/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.9.txt deleted file mode 100644 index ad3f0fa58..000000000 --- a/owl-bot-staging/bigtable_admin/v2/testing/constraints-3.9.txt +++ /dev/null @@ -1,7 +0,0 @@ -# -*- coding: utf-8 -*- -# This constraints file is required for unit tests. -# List all library dependencies and extras in this file. -google-api-core -proto-plus -protobuf -grpc-google-iam-v1 diff --git a/owl-bot-staging/bigtable_admin/v2/tests/__init__.py b/owl-bot-staging/bigtable_admin/v2/tests/__init__.py deleted file mode 100644 index 1b4db446e..000000000 --- a/owl-bot-staging/bigtable_admin/v2/tests/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/bigtable_admin/v2/tests/unit/__init__.py b/owl-bot-staging/bigtable_admin/v2/tests/unit/__init__.py deleted file mode 100644 index 1b4db446e..000000000 --- a/owl-bot-staging/bigtable_admin/v2/tests/unit/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/__init__.py b/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/__init__.py deleted file mode 100644 index 1b4db446e..000000000 --- a/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/__init__.py b/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/__init__.py deleted file mode 100644 index 1b4db446e..000000000 --- a/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/__init__.py +++ /dev/null @@ -1,16 +0,0 @@ - -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# diff --git a/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py deleted file mode 100644 index f71dccb3b..000000000 --- a/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ /dev/null @@ -1,12241 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -# try/except added for compatibility with python < 3.8 -try: - from unittest import mock - from unittest.mock import AsyncMock # pragma: NO COVER -except ImportError: # pragma: NO COVER - import mock - -import grpc -from grpc.experimental import aio -from collections.abc import Iterable -from google.protobuf import json_format -import json -import math -import pytest -from google.api_core import api_core_version -from proto.marshal.rules.dates import DurationRule, TimestampRule -from proto.marshal.rules import wrappers -from requests import Response -from requests import Request, PreparedRequest -from requests.sessions import Session -from google.protobuf import json_format - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import BigtableInstanceAdminAsyncClient -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import BigtableInstanceAdminClient -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import pagers -from google.cloud.bigtable_admin_v2.services.bigtable_instance_admin import transports -from google.cloud.bigtable_admin_v2.types import bigtable_instance_admin -from google.cloud.bigtable_admin_v2.types import common -from google.cloud.bigtable_admin_v2.types import instance -from google.cloud.bigtable_admin_v2.types import instance as gba_instance -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import options_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.oauth2 import service_account -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.type import expr_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - -# If default endpoint template is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint template so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint_template(client): - return "test.{UNIVERSE_DOMAIN}" if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) else client._DEFAULT_ENDPOINT_TEMPLATE - -# Anonymous Credentials with universe domain property. If no universe domain is provided, then -# the default universe domain is "googleapis.com". -class _AnonymousCredentialsWithUniverseDomain(ga_credentials.AnonymousCredentials): - def __init__(self, universe_domain="googleapis.com"): - super(_AnonymousCredentialsWithUniverseDomain, self).__init__() - self._universe_domain = universe_domain - - @property - def universe_domain(self): - return self._universe_domain - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert BigtableInstanceAdminClient._get_default_mtls_endpoint(None) is None - assert BigtableInstanceAdminClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert BigtableInstanceAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert BigtableInstanceAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert BigtableInstanceAdminClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - -def test__read_environment_variables(): - assert BigtableInstanceAdminClient._read_environment_variables() == (False, "auto", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - assert BigtableInstanceAdminClient._read_environment_variables() == (True, "auto", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - assert BigtableInstanceAdminClient._read_environment_variables() == (False, "auto", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError) as excinfo: - BigtableInstanceAdminClient._read_environment_variables() - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - assert BigtableInstanceAdminClient._read_environment_variables() == (False, "never", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - assert BigtableInstanceAdminClient._read_environment_variables() == (False, "always", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): - assert BigtableInstanceAdminClient._read_environment_variables() == (False, "auto", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - BigtableInstanceAdminClient._read_environment_variables() - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - - with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): - assert BigtableInstanceAdminClient._read_environment_variables() == (False, "auto", "foo.com") - -def test__get_client_cert_source(): - mock_provided_cert_source = mock.Mock() - mock_default_cert_source = mock.Mock() - - assert BigtableInstanceAdminClient._get_client_cert_source(None, False) is None - assert BigtableInstanceAdminClient._get_client_cert_source(mock_provided_cert_source, False) is None - assert BigtableInstanceAdminClient._get_client_cert_source(mock_provided_cert_source, True) == mock_provided_cert_source - - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_default_cert_source): - assert BigtableInstanceAdminClient._get_client_cert_source(None, True) is mock_default_cert_source - assert BigtableInstanceAdminClient._get_client_cert_source(mock_provided_cert_source, "true") is mock_provided_cert_source - -@mock.patch.object(BigtableInstanceAdminClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableInstanceAdminClient)) -@mock.patch.object(BigtableInstanceAdminAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableInstanceAdminAsyncClient)) -def test__get_api_endpoint(): - api_override = "foo.com" - mock_client_cert_source = mock.Mock() - default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE - default_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=default_universe) - mock_universe = "bar.com" - mock_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=mock_universe) - - assert BigtableInstanceAdminClient._get_api_endpoint(api_override, mock_client_cert_source, default_universe, "always") == api_override - assert BigtableInstanceAdminClient._get_api_endpoint(None, mock_client_cert_source, default_universe, "auto") == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT - assert BigtableInstanceAdminClient._get_api_endpoint(None, None, default_universe, "auto") == default_endpoint - assert BigtableInstanceAdminClient._get_api_endpoint(None, None, default_universe, "always") == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT - assert BigtableInstanceAdminClient._get_api_endpoint(None, mock_client_cert_source, default_universe, "always") == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT - assert BigtableInstanceAdminClient._get_api_endpoint(None, None, mock_universe, "never") == mock_endpoint - assert BigtableInstanceAdminClient._get_api_endpoint(None, None, default_universe, "never") == default_endpoint - - with pytest.raises(MutualTLSChannelError) as excinfo: - BigtableInstanceAdminClient._get_api_endpoint(None, mock_client_cert_source, mock_universe, "auto") - assert str(excinfo.value) == "mTLS is not supported in any universe other than googleapis.com." - -def test__get_universe_domain(): - client_universe_domain = "foo.com" - universe_domain_env = "bar.com" - - assert BigtableInstanceAdminClient._get_universe_domain(client_universe_domain, universe_domain_env) == client_universe_domain - assert BigtableInstanceAdminClient._get_universe_domain(None, universe_domain_env) == universe_domain_env - assert BigtableInstanceAdminClient._get_universe_domain(None, None) == BigtableInstanceAdminClient._DEFAULT_UNIVERSE - - with pytest.raises(ValueError) as excinfo: - BigtableInstanceAdminClient._get_universe_domain("", None) - assert str(excinfo.value) == "Universe Domain cannot be an empty string." - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc"), - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminRestTransport, "rest"), -]) -def test__validate_universe_domain(client_class, transport_class, transport_name): - client = client_class( - transport=transport_class( - credentials=_AnonymousCredentialsWithUniverseDomain() - ) - ) - assert client._validate_universe_domain() == True - - # Test the case when universe is already validated. - assert client._validate_universe_domain() == True - - if transport_name == "grpc": - # Test the case where credentials are provided by the - # `local_channel_credentials`. The default universes in both match. - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - client = client_class(transport=transport_class(channel=channel)) - assert client._validate_universe_domain() == True - - # Test the case where credentials do not exist: e.g. a transport is provided - # with no credentials. Validation should still succeed because there is no - # mismatch with non-existent credentials. - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - transport=transport_class(channel=channel) - transport._credentials = None - client = client_class(transport=transport) - assert client._validate_universe_domain() == True - - # Test the case when there is a universe mismatch from the credentials. - client = client_class( - transport=transport_class(credentials=_AnonymousCredentialsWithUniverseDomain(universe_domain="foo.com")) - ) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert str(excinfo.value) == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - - # Test the case when there is a universe mismatch from the client. - # - # TODO: Make this test unconditional once the minimum supported version of - # google-api-core becomes 2.15.0 or higher. - api_core_major, api_core_minor, _ = [int(part) for part in api_core_version.__version__.split(".")] - if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): - client = client_class(client_options={"universe_domain": "bar.com"}, transport=transport_class(credentials=_AnonymousCredentialsWithUniverseDomain(),)) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert str(excinfo.value) == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - -@pytest.mark.parametrize("client_class,transport_name", [ - (BigtableInstanceAdminClient, "grpc"), - (BigtableInstanceAdminAsyncClient, "grpc_asyncio"), - (BigtableInstanceAdminClient, "rest"), -]) -def test_bigtable_instance_admin_client_from_service_account_info(client_class, transport_name): - creds = _AnonymousCredentialsWithUniverseDomain() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info, transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'bigtableadmin.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://bigtableadmin.googleapis.com' - ) - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.BigtableInstanceAdminGrpcTransport, "grpc"), - (transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio"), - (transports.BigtableInstanceAdminRestTransport, "rest"), -]) -def test_bigtable_instance_admin_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class,transport_name", [ - (BigtableInstanceAdminClient, "grpc"), - (BigtableInstanceAdminAsyncClient, "grpc_asyncio"), - (BigtableInstanceAdminClient, "rest"), -]) -def test_bigtable_instance_admin_client_from_service_account_file(client_class, transport_name): - creds = _AnonymousCredentialsWithUniverseDomain() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'bigtableadmin.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://bigtableadmin.googleapis.com' - ) - - -def test_bigtable_instance_admin_client_get_transport_class(): - transport = BigtableInstanceAdminClient.get_transport_class() - available_transports = [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminRestTransport, - ] - assert transport in available_transports - - transport = BigtableInstanceAdminClient.get_transport_class("grpc") - assert transport == transports.BigtableInstanceAdminGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc"), - (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio"), - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminRestTransport, "rest"), -]) -@mock.patch.object(BigtableInstanceAdminClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableInstanceAdminClient)) -@mock.patch.object(BigtableInstanceAdminAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableInstanceAdminAsyncClient)) -def test_bigtable_instance_admin_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(BigtableInstanceAdminClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=_AnonymousCredentialsWithUniverseDomain() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(BigtableInstanceAdminClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - client = client_class(transport=transport_name) - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError) as excinfo: - client = client_class(transport=transport_name) - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - # Check the case api_endpoint is provided - options = client_options.ClientOptions(api_audience="https://language.googleapis.com") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience="https://language.googleapis.com" - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc", "true"), - (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc", "false"), - (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio", "false"), - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminRestTransport, "rest", "true"), - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminRestTransport, "rest", "false"), -]) -@mock.patch.object(BigtableInstanceAdminClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableInstanceAdminClient)) -@mock.patch.object(BigtableInstanceAdminAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableInstanceAdminAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_bigtable_instance_admin_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE) - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE) - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class", [ - BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient -]) -@mock.patch.object(BigtableInstanceAdminClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableInstanceAdminClient)) -@mock.patch.object(BigtableInstanceAdminAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableInstanceAdminAsyncClient)) -def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client_class): - mock_client_cert_source = mock.Mock() - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source == mock_client_cert_source - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - mock_client_cert_source = mock.Mock() - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source == mock_client_cert_source - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - client_class.get_mtls_endpoint_and_cert_source() - - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError) as excinfo: - client_class.get_mtls_endpoint_and_cert_source() - - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - -@pytest.mark.parametrize("client_class", [ - BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient -]) -@mock.patch.object(BigtableInstanceAdminClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableInstanceAdminClient)) -@mock.patch.object(BigtableInstanceAdminAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableInstanceAdminAsyncClient)) -def test_bigtable_instance_admin_client_client_api_endpoint(client_class): - mock_client_cert_source = client_cert_source_callback - api_override = "foo.com" - default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE - default_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=default_universe) - mock_universe = "bar.com" - mock_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=mock_universe) - - # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", - # use ClientOptions.api_endpoint as the api endpoint regardless. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"): - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=api_override) - client = client_class(client_options=options, credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == api_override - - # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - client = client_class(credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == default_endpoint - - # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", - # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - client = client_class(credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - - # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), - # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, - # and ClientOptions.universe_domain="bar.com", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. - options = client_options.ClientOptions() - universe_exists = hasattr(options, "universe_domain") - if universe_exists: - options = client_options.ClientOptions(universe_domain=mock_universe) - client = client_class(client_options=options, credentials=_AnonymousCredentialsWithUniverseDomain()) - else: - client = client_class(client_options=options, credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == (mock_endpoint if universe_exists else default_endpoint) - assert client.universe_domain == (mock_universe if universe_exists else default_universe) - - # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. - options = client_options.ClientOptions() - if hasattr(options, "universe_domain"): - delattr(options, "universe_domain") - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - client = client_class(client_options=options, credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == default_endpoint - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc"), - (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio"), - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminRestTransport, "rest"), -]) -def test_bigtable_instance_admin_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc", grpc_helpers), - (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminRestTransport, "rest", None), -]) -def test_bigtable_instance_admin_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -def test_bigtable_instance_admin_client_client_options_from_dict(): - with mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = BigtableInstanceAdminClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport, "grpc", grpc_helpers), - (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), -]) -def test_bigtable_instance_admin_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # test that the credentials from file are saved and used as the credentials. - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel" - ) as create_channel: - creds = _AnonymousCredentialsWithUniverseDomain() - file_creds = _AnonymousCredentialsWithUniverseDomain() - load_creds.return_value = (file_creds, None) - adc.return_value = (creds, None) - client = client_class(client_options=options, transport=transport_name) - create_channel.assert_called_with( - "bigtableadmin.googleapis.com:443", - credentials=file_creds, - credentials_file=None, - quota_project_id=None, - default_scopes=( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.cluster', - 'https://www.googleapis.com/auth/bigtable.admin.instance', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - scopes=None, - default_host="bigtableadmin.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.CreateInstanceRequest, - dict, -]) -def test_create_instance(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_instance), - '__call__') as call: - client.create_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest() - -@pytest.mark.asyncio -async def test_create_instance_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.CreateInstanceRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateInstanceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_instance_async_from_dict(): - await test_create_instance_async(request_type=dict) - - -def test_create_instance_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateInstanceRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_instance), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_instance_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateInstanceRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_instance), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_create_instance_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_instance( - parent='parent_value', - instance_id='instance_id_value', - instance=gba_instance.Instance(name='name_value'), - clusters={'key_value': gba_instance.Cluster(name='name_value')}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].instance_id - mock_val = 'instance_id_value' - assert arg == mock_val - arg = args[0].instance - mock_val = gba_instance.Instance(name='name_value') - assert arg == mock_val - arg = args[0].clusters - mock_val = {'key_value': gba_instance.Cluster(name='name_value')} - assert arg == mock_val - - -def test_create_instance_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_instance( - bigtable_instance_admin.CreateInstanceRequest(), - parent='parent_value', - instance_id='instance_id_value', - instance=gba_instance.Instance(name='name_value'), - clusters={'key_value': gba_instance.Cluster(name='name_value')}, - ) - -@pytest.mark.asyncio -async def test_create_instance_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_instance( - parent='parent_value', - instance_id='instance_id_value', - instance=gba_instance.Instance(name='name_value'), - clusters={'key_value': gba_instance.Cluster(name='name_value')}, - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].instance_id - mock_val = 'instance_id_value' - assert arg == mock_val - arg = args[0].instance - mock_val = gba_instance.Instance(name='name_value') - assert arg == mock_val - arg = args[0].clusters - mock_val = {'key_value': gba_instance.Cluster(name='name_value')} - assert arg == mock_val - -@pytest.mark.asyncio -async def test_create_instance_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_instance( - bigtable_instance_admin.CreateInstanceRequest(), - parent='parent_value', - instance_id='instance_id_value', - instance=gba_instance.Instance(name='name_value'), - clusters={'key_value': gba_instance.Cluster(name='name_value')}, - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.GetInstanceRequest, - dict, -]) -def test_get_instance(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Instance( - name='name_value', - display_name='display_name_value', - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - ) - response = client.get_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - - -def test_get_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_instance), - '__call__') as call: - client.get_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest() - -@pytest.mark.asyncio -async def test_get_instance_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.GetInstanceRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance( - name='name_value', - display_name='display_name_value', - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - )) - response = await client.get_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetInstanceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - - -@pytest.mark.asyncio -async def test_get_instance_async_from_dict(): - await test_get_instance_async(request_type=dict) - - -def test_get_instance_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetInstanceRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_instance), - '__call__') as call: - call.return_value = instance.Instance() - client.get_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_instance_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetInstanceRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_instance), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) - await client.get_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_instance_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Instance() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_instance( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_instance_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_instance( - bigtable_instance_admin.GetInstanceRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_instance_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Instance() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_instance( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_instance_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_instance( - bigtable_instance_admin.GetInstanceRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.ListInstancesRequest, - dict, -]) -def test_list_instances(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_instances), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListInstancesResponse( - failed_locations=['failed_locations_value'], - next_page_token='next_page_token_value', - ) - response = client.list_instances(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest() - - # Establish that the response is the type that we expect. - assert response.raw_page is response - assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) - assert response.failed_locations == ['failed_locations_value'] - assert response.next_page_token == 'next_page_token_value' - - -def test_list_instances_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_instances), - '__call__') as call: - client.list_instances() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest() - -@pytest.mark.asyncio -async def test_list_instances_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.ListInstancesRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_instances), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListInstancesResponse( - failed_locations=['failed_locations_value'], - next_page_token='next_page_token_value', - )) - response = await client.list_instances(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListInstancesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) - assert response.failed_locations == ['failed_locations_value'] - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_instances_async_from_dict(): - await test_list_instances_async(request_type=dict) - - -def test_list_instances_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListInstancesRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_instances), - '__call__') as call: - call.return_value = bigtable_instance_admin.ListInstancesResponse() - client.list_instances(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_instances_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListInstancesRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_instances), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListInstancesResponse()) - await client.list_instances(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_instances_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_instances), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListInstancesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_instances( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_instances_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_instances( - bigtable_instance_admin.ListInstancesRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_instances_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_instances), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListInstancesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListInstancesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_instances( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_instances_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_instances( - bigtable_instance_admin.ListInstancesRequest(), - parent='parent_value', - ) - - -@pytest.mark.parametrize("request_type", [ - instance.Instance, - dict, -]) -def test_update_instance(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Instance( - name='name_value', - display_name='display_name_value', - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - ) - response = client.update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance() - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - - -def test_update_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_instance), - '__call__') as call: - client.update_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance() - -@pytest.mark.asyncio -async def test_update_instance_async(transport: str = 'grpc_asyncio', request_type=instance.Instance): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance( - name='name_value', - display_name='display_name_value', - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - )) - response = await client.update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == instance.Instance() - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - - -@pytest.mark.asyncio -async def test_update_instance_async_from_dict(): - await test_update_instance_async(request_type=dict) - - -def test_update_instance_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = instance.Instance() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_instance), - '__call__') as call: - call.return_value = instance.Instance() - client.update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_instance_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = instance.Instance() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_instance), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Instance()) - await client.update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.PartialUpdateInstanceRequest, - dict, -]) -def test_partial_update_instance(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.partial_update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_partial_update_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), - '__call__') as call: - client.partial_update_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() - -@pytest.mark.asyncio -async def test_partial_update_instance_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.PartialUpdateInstanceRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.partial_update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateInstanceRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_partial_update_instance_async_from_dict(): - await test_partial_update_instance_async(request_type=dict) - - -def test_partial_update_instance_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.PartialUpdateInstanceRequest() - - request.instance.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.partial_update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'instance.name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_partial_update_instance_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.PartialUpdateInstanceRequest() - - request.instance.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.partial_update_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'instance.name=name_value', - ) in kw['metadata'] - - -def test_partial_update_instance_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.partial_update_instance( - instance=gba_instance.Instance(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].instance - mock_val = gba_instance.Instance(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_partial_update_instance_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.partial_update_instance( - bigtable_instance_admin.PartialUpdateInstanceRequest(), - instance=gba_instance.Instance(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - -@pytest.mark.asyncio -async def test_partial_update_instance_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.partial_update_instance( - instance=gba_instance.Instance(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].instance - mock_val = gba_instance.Instance(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_partial_update_instance_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.partial_update_instance( - bigtable_instance_admin.PartialUpdateInstanceRequest(), - instance=gba_instance.Instance(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.DeleteInstanceRequest, - dict, -]) -def test_delete_instance(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_instance_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_instance), - '__call__') as call: - client.delete_instance() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() - -@pytest.mark.asyncio -async def test_delete_instance_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.DeleteInstanceRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteInstanceRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_instance_async_from_dict(): - await test_delete_instance_async(request_type=dict) - - -def test_delete_instance_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteInstanceRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_instance), - '__call__') as call: - call.return_value = None - client.delete_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_instance_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteInstanceRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_instance), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_instance(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_delete_instance_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_instance( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_instance_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_instance( - bigtable_instance_admin.DeleteInstanceRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_delete_instance_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_instance), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_instance( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_delete_instance_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_instance( - bigtable_instance_admin.DeleteInstanceRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.CreateClusterRequest, - dict, -]) -def test_create_cluster(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - client.create_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest() - -@pytest.mark.asyncio -async def test_create_cluster_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.CreateClusterRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_cluster_async_from_dict(): - await test_create_cluster_async(request_type=dict) - - -def test_create_cluster_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateClusterRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_cluster_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateClusterRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_create_cluster_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_cluster( - parent='parent_value', - cluster_id='cluster_id_value', - cluster=instance.Cluster(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].cluster_id - mock_val = 'cluster_id_value' - assert arg == mock_val - arg = args[0].cluster - mock_val = instance.Cluster(name='name_value') - assert arg == mock_val - - -def test_create_cluster_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_cluster( - bigtable_instance_admin.CreateClusterRequest(), - parent='parent_value', - cluster_id='cluster_id_value', - cluster=instance.Cluster(name='name_value'), - ) - -@pytest.mark.asyncio -async def test_create_cluster_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_cluster( - parent='parent_value', - cluster_id='cluster_id_value', - cluster=instance.Cluster(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].cluster_id - mock_val = 'cluster_id_value' - assert arg == mock_val - arg = args[0].cluster - mock_val = instance.Cluster(name='name_value') - assert arg == mock_val - -@pytest.mark.asyncio -async def test_create_cluster_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_cluster( - bigtable_instance_admin.CreateClusterRequest(), - parent='parent_value', - cluster_id='cluster_id_value', - cluster=instance.Cluster(name='name_value'), - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.GetClusterRequest, - dict, -]) -def test_get_cluster(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Cluster( - name='name_value', - location='location_value', - state=instance.Cluster.State.READY, - serve_nodes=1181, - default_storage_type=common.StorageType.SSD, - ) - response = client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Cluster) - assert response.name == 'name_value' - assert response.location == 'location_value' - assert response.state == instance.Cluster.State.READY - assert response.serve_nodes == 1181 - assert response.default_storage_type == common.StorageType.SSD - - -def test_get_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - client.get_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest() - -@pytest.mark.asyncio -async def test_get_cluster_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.GetClusterRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster( - name='name_value', - location='location_value', - state=instance.Cluster.State.READY, - serve_nodes=1181, - default_storage_type=common.StorageType.SSD, - )) - response = await client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Cluster) - assert response.name == 'name_value' - assert response.location == 'location_value' - assert response.state == instance.Cluster.State.READY - assert response.serve_nodes == 1181 - assert response.default_storage_type == common.StorageType.SSD - - -@pytest.mark.asyncio -async def test_get_cluster_async_from_dict(): - await test_get_cluster_async(request_type=dict) - - -def test_get_cluster_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetClusterRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - call.return_value = instance.Cluster() - client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_cluster_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetClusterRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster()) - await client.get_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_cluster_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Cluster() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_cluster( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_cluster_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_cluster( - bigtable_instance_admin.GetClusterRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_cluster_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = instance.Cluster() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.Cluster()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_cluster( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_cluster_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_cluster( - bigtable_instance_admin.GetClusterRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.ListClustersRequest, - dict, -]) -def test_list_clusters(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListClustersResponse( - failed_locations=['failed_locations_value'], - next_page_token='next_page_token_value', - ) - response = client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest() - - # Establish that the response is the type that we expect. - assert response.raw_page is response - assert isinstance(response, bigtable_instance_admin.ListClustersResponse) - assert response.failed_locations == ['failed_locations_value'] - assert response.next_page_token == 'next_page_token_value' - - -def test_list_clusters_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - client.list_clusters() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest() - -@pytest.mark.asyncio -async def test_list_clusters_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.ListClustersRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListClustersResponse( - failed_locations=['failed_locations_value'], - next_page_token='next_page_token_value', - )) - response = await client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListClustersRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListClustersResponse) - assert response.failed_locations == ['failed_locations_value'] - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_clusters_async_from_dict(): - await test_list_clusters_async(request_type=dict) - - -def test_list_clusters_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListClustersRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - call.return_value = bigtable_instance_admin.ListClustersResponse() - client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_clusters_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListClustersRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListClustersResponse()) - await client.list_clusters(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_clusters_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListClustersResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_clusters( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_clusters_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_clusters( - bigtable_instance_admin.ListClustersRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_clusters_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_clusters), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListClustersResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListClustersResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_clusters( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_clusters_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_clusters( - bigtable_instance_admin.ListClustersRequest(), - parent='parent_value', - ) - - -@pytest.mark.parametrize("request_type", [ - instance.Cluster, - dict, -]) -def test_update_cluster(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - client.update_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster() - -@pytest.mark.asyncio -async def test_update_cluster_async(transport: str = 'grpc_asyncio', request_type=instance.Cluster): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == instance.Cluster() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_cluster_async_from_dict(): - await test_update_cluster_async(request_type=dict) - - -def test_update_cluster_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = instance.Cluster() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_cluster_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = instance.Cluster() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_cluster), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.PartialUpdateClusterRequest, - dict, -]) -def test_partial_update_cluster(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.partial_update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_partial_update_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), - '__call__') as call: - client.partial_update_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() - -@pytest.mark.asyncio -async def test_partial_update_cluster_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.PartialUpdateClusterRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.partial_update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.PartialUpdateClusterRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_partial_update_cluster_async_from_dict(): - await test_partial_update_cluster_async(request_type=dict) - - -def test_partial_update_cluster_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.PartialUpdateClusterRequest() - - request.cluster.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.partial_update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'cluster.name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_partial_update_cluster_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.PartialUpdateClusterRequest() - - request.cluster.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.partial_update_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'cluster.name=name_value', - ) in kw['metadata'] - - -def test_partial_update_cluster_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.partial_update_cluster( - cluster=instance.Cluster(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].cluster - mock_val = instance.Cluster(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_partial_update_cluster_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.partial_update_cluster( - bigtable_instance_admin.PartialUpdateClusterRequest(), - cluster=instance.Cluster(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - -@pytest.mark.asyncio -async def test_partial_update_cluster_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.partial_update_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.partial_update_cluster( - cluster=instance.Cluster(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].cluster - mock_val = instance.Cluster(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_partial_update_cluster_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.partial_update_cluster( - bigtable_instance_admin.PartialUpdateClusterRequest(), - cluster=instance.Cluster(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.DeleteClusterRequest, - dict, -]) -def test_delete_cluster(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_cluster_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - client.delete_cluster() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest() - -@pytest.mark.asyncio -async def test_delete_cluster_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.DeleteClusterRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteClusterRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_cluster_async_from_dict(): - await test_delete_cluster_async(request_type=dict) - - -def test_delete_cluster_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteClusterRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - call.return_value = None - client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_cluster_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteClusterRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_cluster(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_delete_cluster_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_cluster( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_cluster_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_cluster( - bigtable_instance_admin.DeleteClusterRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_delete_cluster_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_cluster), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_cluster( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_delete_cluster_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_cluster( - bigtable_instance_admin.DeleteClusterRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.CreateAppProfileRequest, - dict, -]) -def test_create_app_profile(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = instance.AppProfile( - name='name_value', - etag='etag_value', - description='description_value', - priority=instance.AppProfile.Priority.PRIORITY_LOW, - ) - response = client.create_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == 'name_value' - assert response.etag == 'etag_value' - assert response.description == 'description_value' - - -def test_create_app_profile_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), - '__call__') as call: - client.create_app_profile() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() - -@pytest.mark.asyncio -async def test_create_app_profile_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.CreateAppProfileRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile( - name='name_value', - etag='etag_value', - description='description_value', - )) - response = await client.create_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.CreateAppProfileRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == 'name_value' - assert response.etag == 'etag_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_create_app_profile_async_from_dict(): - await test_create_app_profile_async(request_type=dict) - - -def test_create_app_profile_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateAppProfileRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), - '__call__') as call: - call.return_value = instance.AppProfile() - client.create_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_app_profile_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.CreateAppProfileRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) - await client.create_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_create_app_profile_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = instance.AppProfile() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_app_profile( - parent='parent_value', - app_profile_id='app_profile_id_value', - app_profile=instance.AppProfile(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - arg = args[0].app_profile - mock_val = instance.AppProfile(name='name_value') - assert arg == mock_val - - -def test_create_app_profile_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_app_profile( - bigtable_instance_admin.CreateAppProfileRequest(), - parent='parent_value', - app_profile_id='app_profile_id_value', - app_profile=instance.AppProfile(name='name_value'), - ) - -@pytest.mark.asyncio -async def test_create_app_profile_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = instance.AppProfile() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_app_profile( - parent='parent_value', - app_profile_id='app_profile_id_value', - app_profile=instance.AppProfile(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].app_profile_id - mock_val = 'app_profile_id_value' - assert arg == mock_val - arg = args[0].app_profile - mock_val = instance.AppProfile(name='name_value') - assert arg == mock_val - -@pytest.mark.asyncio -async def test_create_app_profile_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_app_profile( - bigtable_instance_admin.CreateAppProfileRequest(), - parent='parent_value', - app_profile_id='app_profile_id_value', - app_profile=instance.AppProfile(name='name_value'), - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.GetAppProfileRequest, - dict, -]) -def test_get_app_profile(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = instance.AppProfile( - name='name_value', - etag='etag_value', - description='description_value', - priority=instance.AppProfile.Priority.PRIORITY_LOW, - ) - response = client.get_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == 'name_value' - assert response.etag == 'etag_value' - assert response.description == 'description_value' - - -def test_get_app_profile_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_app_profile), - '__call__') as call: - client.get_app_profile() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest() - -@pytest.mark.asyncio -async def test_get_app_profile_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.GetAppProfileRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile( - name='name_value', - etag='etag_value', - description='description_value', - )) - response = await client.get_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.GetAppProfileRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == 'name_value' - assert response.etag == 'etag_value' - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_app_profile_async_from_dict(): - await test_get_app_profile_async(request_type=dict) - - -def test_get_app_profile_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetAppProfileRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_app_profile), - '__call__') as call: - call.return_value = instance.AppProfile() - client.get_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_app_profile_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.GetAppProfileRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_app_profile), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) - await client.get_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_app_profile_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = instance.AppProfile() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_app_profile( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_app_profile_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_app_profile( - bigtable_instance_admin.GetAppProfileRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_app_profile_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = instance.AppProfile() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(instance.AppProfile()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_app_profile( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_app_profile_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_app_profile( - bigtable_instance_admin.GetAppProfileRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.ListAppProfilesRequest, - dict, -]) -def test_list_app_profiles(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListAppProfilesResponse( - next_page_token='next_page_token_value', - failed_locations=['failed_locations_value'], - ) - response = client.list_app_profiles(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAppProfilesPager) - assert response.next_page_token == 'next_page_token_value' - assert response.failed_locations == ['failed_locations_value'] - - -def test_list_app_profiles_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), - '__call__') as call: - client.list_app_profiles() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() - -@pytest.mark.asyncio -async def test_list_app_profiles_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.ListAppProfilesRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListAppProfilesResponse( - next_page_token='next_page_token_value', - failed_locations=['failed_locations_value'], - )) - response = await client.list_app_profiles(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListAppProfilesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAppProfilesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - assert response.failed_locations == ['failed_locations_value'] - - -@pytest.mark.asyncio -async def test_list_app_profiles_async_from_dict(): - await test_list_app_profiles_async(request_type=dict) - - -def test_list_app_profiles_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListAppProfilesRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), - '__call__') as call: - call.return_value = bigtable_instance_admin.ListAppProfilesResponse() - client.list_app_profiles(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_app_profiles_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListAppProfilesRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListAppProfilesResponse()) - await client.list_app_profiles(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_app_profiles_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListAppProfilesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_app_profiles( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_app_profiles_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_app_profiles( - bigtable_instance_admin.ListAppProfilesRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_app_profiles_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListAppProfilesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListAppProfilesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_app_profiles( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_app_profiles_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_app_profiles( - bigtable_instance_admin.ListAppProfilesRequest(), - parent='parent_value', - ) - - -def test_list_app_profiles_pager(transport_name: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - instance.AppProfile(), - ], - next_page_token='abc', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], - next_page_token='def', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - ], - next_page_token='ghi', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_app_profiles(request={}) - - assert pager._metadata == metadata - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.AppProfile) - for i in results) -def test_list_app_profiles_pages(transport_name: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - instance.AppProfile(), - ], - next_page_token='abc', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], - next_page_token='def', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - ], - next_page_token='ghi', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - ], - ), - RuntimeError, - ) - pages = list(client.list_app_profiles(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_app_profiles_async_pager(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - instance.AppProfile(), - ], - next_page_token='abc', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], - next_page_token='def', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - ], - next_page_token='ghi', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_app_profiles(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, instance.AppProfile) - for i in responses) - - -@pytest.mark.asyncio -async def test_list_app_profiles_async_pages(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_app_profiles), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - instance.AppProfile(), - ], - next_page_token='abc', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], - next_page_token='def', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - ], - next_page_token='ghi', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_app_profiles(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.UpdateAppProfileRequest, - dict, -]) -def test_update_app_profile(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_app_profile_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), - '__call__') as call: - client.update_app_profile() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() - -@pytest.mark.asyncio -async def test_update_app_profile_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.UpdateAppProfileRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.UpdateAppProfileRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_app_profile_async_from_dict(): - await test_update_app_profile_async(request_type=dict) - - -def test_update_app_profile_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.UpdateAppProfileRequest() - - request.app_profile.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'app_profile.name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_app_profile_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.UpdateAppProfileRequest() - - request.app_profile.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'app_profile.name=name_value', - ) in kw['metadata'] - - -def test_update_app_profile_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_app_profile( - app_profile=instance.AppProfile(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].app_profile - mock_val = instance.AppProfile(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_app_profile_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_app_profile( - bigtable_instance_admin.UpdateAppProfileRequest(), - app_profile=instance.AppProfile(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - -@pytest.mark.asyncio -async def test_update_app_profile_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_app_profile( - app_profile=instance.AppProfile(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].app_profile - mock_val = instance.AppProfile(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_update_app_profile_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_app_profile( - bigtable_instance_admin.UpdateAppProfileRequest(), - app_profile=instance.AppProfile(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.DeleteAppProfileRequest, - dict, -]) -def test_delete_app_profile(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_app_profile_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), - '__call__') as call: - client.delete_app_profile() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() - -@pytest.mark.asyncio -async def test_delete_app_profile_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.DeleteAppProfileRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.DeleteAppProfileRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_app_profile_async_from_dict(): - await test_delete_app_profile_async(request_type=dict) - - -def test_delete_app_profile_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteAppProfileRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), - '__call__') as call: - call.return_value = None - client.delete_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_app_profile_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.DeleteAppProfileRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_app_profile(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_delete_app_profile_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_app_profile( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_app_profile_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_app_profile( - bigtable_instance_admin.DeleteAppProfileRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_delete_app_profile_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_app_profile), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_app_profile( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_delete_app_profile_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_app_profile( - bigtable_instance_admin.DeleteAppProfileRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - iam_policy_pb2.GetIamPolicyRequest, - dict, -]) -def test_get_iam_policy(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy( - version=774, - etag=b'etag_blob', - ) - response = client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b'etag_blob' - - -def test_get_iam_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - client.get_iam_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() - -@pytest.mark.asyncio -async def test_get_iam_policy_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.GetIamPolicyRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy( - version=774, - etag=b'etag_blob', - )) - response = await client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b'etag_blob' - - -@pytest.mark.asyncio -async def test_get_iam_policy_async_from_dict(): - await test_get_iam_policy_async(request_type=dict) - - -def test_get_iam_policy_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.GetIamPolicyRequest() - - request.resource = 'resource_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - call.return_value = policy_pb2.Policy() - client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'resource=resource_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_iam_policy_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.GetIamPolicyRequest() - - request.resource = 'resource_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - await client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'resource=resource_value', - ) in kw['metadata'] - -def test_get_iam_policy_from_dict_foreign(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - response = client.get_iam_policy(request={ - 'resource': 'resource_value', - 'options': options_pb2.GetPolicyOptions(requested_policy_version=2598), - } - ) - call.assert_called() - - -def test_get_iam_policy_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_iam_policy( - resource='resource_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = 'resource_value' - assert arg == mock_val - - -def test_get_iam_policy_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource='resource_value', - ) - -@pytest.mark.asyncio -async def test_get_iam_policy_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_iam_policy( - resource='resource_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = 'resource_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_iam_policy_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource='resource_value', - ) - - -@pytest.mark.parametrize("request_type", [ - iam_policy_pb2.SetIamPolicyRequest, - dict, -]) -def test_set_iam_policy(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy( - version=774, - etag=b'etag_blob', - ) - response = client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b'etag_blob' - - -def test_set_iam_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - client.set_iam_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() - -@pytest.mark.asyncio -async def test_set_iam_policy_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.SetIamPolicyRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy( - version=774, - etag=b'etag_blob', - )) - response = await client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b'etag_blob' - - -@pytest.mark.asyncio -async def test_set_iam_policy_async_from_dict(): - await test_set_iam_policy_async(request_type=dict) - - -def test_set_iam_policy_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.SetIamPolicyRequest() - - request.resource = 'resource_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - call.return_value = policy_pb2.Policy() - client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'resource=resource_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_iam_policy_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.SetIamPolicyRequest() - - request.resource = 'resource_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - await client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'resource=resource_value', - ) in kw['metadata'] - -def test_set_iam_policy_from_dict_foreign(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - response = client.set_iam_policy(request={ - 'resource': 'resource_value', - 'policy': policy_pb2.Policy(version=774), - 'update_mask': field_mask_pb2.FieldMask(paths=['paths_value']), - } - ) - call.assert_called() - - -def test_set_iam_policy_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_iam_policy( - resource='resource_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = 'resource_value' - assert arg == mock_val - - -def test_set_iam_policy_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource='resource_value', - ) - -@pytest.mark.asyncio -async def test_set_iam_policy_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_iam_policy( - resource='resource_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = 'resource_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_set_iam_policy_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource='resource_value', - ) - - -@pytest.mark.parametrize("request_type", [ - iam_policy_pb2.TestIamPermissionsRequest, - dict, -]) -def test_test_iam_permissions(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=['permissions_value'], - ) - response = client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ['permissions_value'] - - -def test_test_iam_permissions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - client.test_iam_permissions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() - -@pytest.mark.asyncio -async def test_test_iam_permissions_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.TestIamPermissionsRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse( - permissions=['permissions_value'], - )) - response = await client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ['permissions_value'] - - -@pytest.mark.asyncio -async def test_test_iam_permissions_async_from_dict(): - await test_test_iam_permissions_async(request_type=dict) - - -def test_test_iam_permissions_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.TestIamPermissionsRequest() - - request.resource = 'resource_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'resource=resource_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_test_iam_permissions_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.TestIamPermissionsRequest() - - request.resource = 'resource_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse()) - await client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'resource=resource_value', - ) in kw['metadata'] - -def test_test_iam_permissions_from_dict_foreign(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - response = client.test_iam_permissions(request={ - 'resource': 'resource_value', - 'permissions': ['permissions_value'], - } - ) - call.assert_called() - - -def test_test_iam_permissions_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.test_iam_permissions( - resource='resource_value', - permissions=['permissions_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = 'resource_value' - assert arg == mock_val - arg = args[0].permissions - mock_val = ['permissions_value'] - assert arg == mock_val - - -def test_test_iam_permissions_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource='resource_value', - permissions=['permissions_value'], - ) - -@pytest.mark.asyncio -async def test_test_iam_permissions_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.test_iam_permissions( - resource='resource_value', - permissions=['permissions_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = 'resource_value' - assert arg == mock_val - arg = args[0].permissions - mock_val = ['permissions_value'] - assert arg == mock_val - -@pytest.mark.asyncio -async def test_test_iam_permissions_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource='resource_value', - permissions=['permissions_value'], - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.ListHotTabletsRequest, - dict, -]) -def test_list_hot_tablets(request_type, transport: str = 'grpc'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hot_tablets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListHotTabletsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_hot_tablets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHotTabletsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_hot_tablets_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hot_tablets), - '__call__') as call: - client.list_hot_tablets() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() - -@pytest.mark.asyncio -async def test_list_hot_tablets_async(transport: str = 'grpc_asyncio', request_type=bigtable_instance_admin.ListHotTabletsRequest): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hot_tablets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListHotTabletsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_hot_tablets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_instance_admin.ListHotTabletsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHotTabletsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_hot_tablets_async_from_dict(): - await test_list_hot_tablets_async(request_type=dict) - - -def test_list_hot_tablets_field_headers(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListHotTabletsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hot_tablets), - '__call__') as call: - call.return_value = bigtable_instance_admin.ListHotTabletsResponse() - client.list_hot_tablets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_hot_tablets_field_headers_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_instance_admin.ListHotTabletsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hot_tablets), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListHotTabletsResponse()) - await client.list_hot_tablets(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_hot_tablets_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hot_tablets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListHotTabletsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_hot_tablets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_hot_tablets_flattened_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_hot_tablets( - bigtable_instance_admin.ListHotTabletsRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_hot_tablets_flattened_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hot_tablets), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_instance_admin.ListHotTabletsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_instance_admin.ListHotTabletsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_hot_tablets( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_hot_tablets_flattened_error_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_hot_tablets( - bigtable_instance_admin.ListHotTabletsRequest(), - parent='parent_value', - ) - - -def test_list_hot_tablets_pager(transport_name: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hot_tablets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - instance.HotTablet(), - ], - next_page_token='abc', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], - next_page_token='def', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - ], - next_page_token='ghi', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_hot_tablets(request={}) - - assert pager._metadata == metadata - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.HotTablet) - for i in results) -def test_list_hot_tablets_pages(transport_name: str = "grpc"): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hot_tablets), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - instance.HotTablet(), - ], - next_page_token='abc', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], - next_page_token='def', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - ], - next_page_token='ghi', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - ], - ), - RuntimeError, - ) - pages = list(client.list_hot_tablets(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_hot_tablets_async_pager(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hot_tablets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - instance.HotTablet(), - ], - next_page_token='abc', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], - next_page_token='def', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - ], - next_page_token='ghi', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_hot_tablets(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, instance.HotTablet) - for i in responses) - - -@pytest.mark.asyncio -async def test_list_hot_tablets_async_pages(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_hot_tablets), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - instance.HotTablet(), - ], - next_page_token='abc', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], - next_page_token='def', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - ], - next_page_token='ghi', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_hot_tablets(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.CreateInstanceRequest, - dict, -]) -def test_create_instance_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.create_instance(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_create_instance_rest_required_fields(request_type=bigtable_instance_admin.CreateInstanceRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["instance_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).create_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - jsonified_request["instanceId"] = 'instance_id_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).create_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - assert "instanceId" in jsonified_request - assert jsonified_request["instanceId"] == 'instance_id_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.create_instance(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_create_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.create_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("parent", "instanceId", "instance", "clusters", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_create_instance") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_create_instance") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.CreateInstanceRequest.pb(bigtable_instance_admin.CreateInstanceRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = bigtable_instance_admin.CreateInstanceRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.create_instance(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_instance_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.CreateInstanceRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_instance(request) - - -def test_create_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - instance_id='instance_id_value', - instance=gba_instance.Instance(name='name_value'), - clusters={'key_value': gba_instance.Cluster(name='name_value')}, - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.create_instance(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1]) - - -def test_create_instance_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_instance( - bigtable_instance_admin.CreateInstanceRequest(), - parent='parent_value', - instance_id='instance_id_value', - instance=gba_instance.Instance(name='name_value'), - clusters={'key_value': gba_instance.Cluster(name='name_value')}, - ) - - -def test_create_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.GetInstanceRequest, - dict, -]) -def test_get_instance_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = instance.Instance( - name='name_value', - display_name='display_name_value', - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_instance(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - - -def test_get_instance_rest_required_fields(request_type=bigtable_instance_admin.GetInstanceRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.Instance() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_instance(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.get_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_get_instance") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_get_instance") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.GetInstanceRequest.pb(bigtable_instance_admin.GetInstanceRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.Instance.to_json(instance.Instance()) - - request = bigtable_instance_admin.GetInstanceRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.Instance() - - client.get_instance(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_instance_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.GetInstanceRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_instance(request) - - -def test_get_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = instance.Instance() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_instance(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1]) - - -def test_get_instance_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_instance( - bigtable_instance_admin.GetInstanceRequest(), - name='name_value', - ) - - -def test_get_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.ListInstancesRequest, - dict, -]) -def test_list_instances_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListInstancesResponse( - failed_locations=['failed_locations_value'], - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_instances(request) - - assert response.raw_page is response - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListInstancesResponse) - assert response.failed_locations == ['failed_locations_value'] - assert response.next_page_token == 'next_page_token_value' - - -def test_list_instances_rest_required_fields(request_type=bigtable_instance_admin.ListInstancesRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_instances._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_instances._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("page_token", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListInstancesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_instances(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_instances_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.list_instances._get_unset_required_fields({}) - assert set(unset_fields) == (set(("pageToken", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_instances_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_list_instances") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_list_instances") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.ListInstancesRequest.pb(bigtable_instance_admin.ListInstancesRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_instance_admin.ListInstancesResponse.to_json(bigtable_instance_admin.ListInstancesResponse()) - - request = bigtable_instance_admin.ListInstancesRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListInstancesResponse() - - client.list_instances(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_instances_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.ListInstancesRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_instances(request) - - -def test_list_instances_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListInstancesResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListInstancesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_instances(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*}/instances" % client.transport._host, args[1]) - - -def test_list_instances_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_instances( - bigtable_instance_admin.ListInstancesRequest(), - parent='parent_value', - ) - - -def test_list_instances_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - instance.Instance, - dict, -]) -def test_update_instance_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = instance.Instance( - name='name_value', - display_name='display_name_value', - state=instance.Instance.State.READY, - type_=instance.Instance.Type.PRODUCTION, - satisfies_pzs=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.update_instance(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Instance) - assert response.name == 'name_value' - assert response.display_name == 'display_name_value' - assert response.state == instance.Instance.State.READY - assert response.type_ == instance.Instance.Type.PRODUCTION - assert response.satisfies_pzs is True - - -def test_update_instance_rest_required_fields(request_type=instance.Instance): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["display_name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).update_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["displayName"] = 'display_name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).update_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "displayName" in jsonified_request - assert jsonified_request["displayName"] == 'display_name_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.Instance() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "put", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.Instance.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.update_instance(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_update_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.update_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("displayName", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_update_instance") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_update_instance") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = instance.Instance.pb(instance.Instance()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.Instance.to_json(instance.Instance()) - - request = instance.Instance() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.Instance() - - client.update_instance(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_instance_rest_bad_request(transport: str = 'rest', request_type=instance.Instance): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_instance(request) - - -def test_update_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.PartialUpdateInstanceRequest, - dict, -]) -def test_partial_update_instance_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'instance': {'name': 'projects/sample1/instances/sample2'}} - request_init["instance"] = {'name': 'projects/sample1/instances/sample2', 'display_name': 'display_name_value', 'state': 1, 'type_': 1, 'labels': {}, 'create_time': {'seconds': 751, 'nanos': 543}, 'satisfies_pzs': True} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.PartialUpdateInstanceRequest.meta.fields["instance"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["instance"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["instance"][field])): - del request_init["instance"][field][i][subfield] - else: - del request_init["instance"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.partial_update_instance(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_partial_update_instance_rest_required_fields(request_type=bigtable_instance_admin.PartialUpdateInstanceRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).partial_update_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).partial_update_instance._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "patch", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.partial_update_instance(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_partial_update_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.partial_update_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(("updateMask", )) & set(("instance", "updateMask", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_partial_update_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_instance") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_instance") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.PartialUpdateInstanceRequest.pb(bigtable_instance_admin.PartialUpdateInstanceRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = bigtable_instance_admin.PartialUpdateInstanceRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.partial_update_instance(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_partial_update_instance_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.PartialUpdateInstanceRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'instance': {'name': 'projects/sample1/instances/sample2'}} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.partial_update_instance(request) - - -def test_partial_update_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'instance': {'name': 'projects/sample1/instances/sample2'}} - - # get truthy value for each flattened field - mock_args = dict( - instance=gba_instance.Instance(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.partial_update_instance(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{instance.name=projects/*/instances/*}" % client.transport._host, args[1]) - - -def test_partial_update_instance_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.partial_update_instance( - bigtable_instance_admin.PartialUpdateInstanceRequest(), - instance=gba_instance.Instance(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_partial_update_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.DeleteInstanceRequest, - dict, -]) -def test_delete_instance_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.delete_instance(request) - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_instance_rest_required_fields(request_type=bigtable_instance_admin.DeleteInstanceRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).delete_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).delete_instance._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "delete", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.delete_instance(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_delete_instance_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.delete_instance._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_instance_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_delete_instance") as pre: - pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteInstanceRequest.pb(bigtable_instance_admin.DeleteInstanceRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_instance_admin.DeleteInstanceRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_instance(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - - -def test_delete_instance_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.DeleteInstanceRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_instance(request) - - -def test_delete_instance_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.delete_instance(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*}" % client.transport._host, args[1]) - - -def test_delete_instance_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_instance( - bigtable_instance_admin.DeleteInstanceRequest(), - name='name_value', - ) - - -def test_delete_instance_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.CreateClusterRequest, - dict, -]) -def test_create_cluster_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request_init["cluster"] = {'name': 'name_value', 'location': 'location_value', 'state': 1, 'serve_nodes': 1181, 'cluster_config': {'cluster_autoscaling_config': {'autoscaling_limits': {'min_serve_nodes': 1600, 'max_serve_nodes': 1602}, 'autoscaling_targets': {'cpu_utilization_percent': 2483, 'storage_utilization_gib_per_node': 3404}}}, 'default_storage_type': 1, 'encryption_config': {'kms_key_name': 'kms_key_name_value'}} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.CreateClusterRequest.meta.fields["cluster"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["cluster"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["cluster"][field])): - del request_init["cluster"][field][i][subfield] - else: - del request_init["cluster"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.create_cluster(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_create_cluster_rest_required_fields(request_type=bigtable_instance_admin.CreateClusterRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["cluster_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - assert "clusterId" not in jsonified_request - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).create_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "clusterId" in jsonified_request - assert jsonified_request["clusterId"] == request_init["cluster_id"] - - jsonified_request["parent"] = 'parent_value' - jsonified_request["clusterId"] = 'cluster_id_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).create_cluster._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("cluster_id", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - assert "clusterId" in jsonified_request - assert jsonified_request["clusterId"] == 'cluster_id_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.create_cluster(request) - - expected_params = [ - ( - "clusterId", - "", - ), - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_create_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.create_cluster._get_unset_required_fields({}) - assert set(unset_fields) == (set(("clusterId", )) & set(("parent", "clusterId", "cluster", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_create_cluster") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_create_cluster") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.CreateClusterRequest.pb(bigtable_instance_admin.CreateClusterRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = bigtable_instance_admin.CreateClusterRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.create_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_cluster_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.CreateClusterRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_cluster(request) - - -def test_create_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/instances/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - cluster_id='cluster_id_value', - cluster=instance.Cluster(name='name_value'), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.create_cluster(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, args[1]) - - -def test_create_cluster_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_cluster( - bigtable_instance_admin.CreateClusterRequest(), - parent='parent_value', - cluster_id='cluster_id_value', - cluster=instance.Cluster(name='name_value'), - ) - - -def test_create_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.GetClusterRequest, - dict, -]) -def test_get_cluster_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = instance.Cluster( - name='name_value', - location='location_value', - state=instance.Cluster.State.READY, - serve_nodes=1181, - default_storage_type=common.StorageType.SSD, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_cluster(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.Cluster) - assert response.name == 'name_value' - assert response.location == 'location_value' - assert response.state == instance.Cluster.State.READY - assert response.serve_nodes == 1181 - assert response.default_storage_type == common.StorageType.SSD - - -def test_get_cluster_rest_required_fields(request_type=bigtable_instance_admin.GetClusterRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.Cluster() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_cluster(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.get_cluster._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_get_cluster") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_get_cluster") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.GetClusterRequest.pb(bigtable_instance_admin.GetClusterRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.Cluster.to_json(instance.Cluster()) - - request = bigtable_instance_admin.GetClusterRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.Cluster() - - client.get_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_cluster_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.GetClusterRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_cluster(request) - - -def test_get_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = instance.Cluster() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.Cluster.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_cluster(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, args[1]) - - -def test_get_cluster_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_cluster( - bigtable_instance_admin.GetClusterRequest(), - name='name_value', - ) - - -def test_get_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.ListClustersRequest, - dict, -]) -def test_list_clusters_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListClustersResponse( - failed_locations=['failed_locations_value'], - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_clusters(request) - - assert response.raw_page is response - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_instance_admin.ListClustersResponse) - assert response.failed_locations == ['failed_locations_value'] - assert response.next_page_token == 'next_page_token_value' - - -def test_list_clusters_rest_required_fields(request_type=bigtable_instance_admin.ListClustersRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_clusters._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_clusters._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("page_token", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListClustersResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_clusters(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_clusters_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.list_clusters._get_unset_required_fields({}) - assert set(unset_fields) == (set(("pageToken", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_clusters_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_list_clusters") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_list_clusters") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.ListClustersRequest.pb(bigtable_instance_admin.ListClustersRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_instance_admin.ListClustersResponse.to_json(bigtable_instance_admin.ListClustersResponse()) - - request = bigtable_instance_admin.ListClustersRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListClustersResponse() - - client.list_clusters(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_clusters_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.ListClustersRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_clusters(request) - - -def test_list_clusters_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListClustersResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/instances/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListClustersResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_clusters(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/clusters" % client.transport._host, args[1]) - - -def test_list_clusters_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_clusters( - bigtable_instance_admin.ListClustersRequest(), - parent='parent_value', - ) - - -def test_list_clusters_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - instance.Cluster, - dict, -]) -def test_update_cluster_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.update_cluster(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_update_cluster") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_update_cluster") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = instance.Cluster.pb(instance.Cluster()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = instance.Cluster() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.update_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_cluster_rest_bad_request(transport: str = 'rest', request_type=instance.Cluster): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_cluster(request) - - -def test_update_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.PartialUpdateClusterRequest, - dict, -]) -def test_partial_update_cluster_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'cluster': {'name': 'projects/sample1/instances/sample2/clusters/sample3'}} - request_init["cluster"] = {'name': 'projects/sample1/instances/sample2/clusters/sample3', 'location': 'location_value', 'state': 1, 'serve_nodes': 1181, 'cluster_config': {'cluster_autoscaling_config': {'autoscaling_limits': {'min_serve_nodes': 1600, 'max_serve_nodes': 1602}, 'autoscaling_targets': {'cpu_utilization_percent': 2483, 'storage_utilization_gib_per_node': 3404}}}, 'default_storage_type': 1, 'encryption_config': {'kms_key_name': 'kms_key_name_value'}} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.PartialUpdateClusterRequest.meta.fields["cluster"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["cluster"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["cluster"][field])): - del request_init["cluster"][field][i][subfield] - else: - del request_init["cluster"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.partial_update_cluster(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_partial_update_cluster_rest_required_fields(request_type=bigtable_instance_admin.PartialUpdateClusterRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).partial_update_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).partial_update_cluster._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "patch", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.partial_update_cluster(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_partial_update_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.partial_update_cluster._get_unset_required_fields({}) - assert set(unset_fields) == (set(("updateMask", )) & set(("cluster", "updateMask", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_partial_update_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_partial_update_cluster") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_partial_update_cluster") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.PartialUpdateClusterRequest.pb(bigtable_instance_admin.PartialUpdateClusterRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = bigtable_instance_admin.PartialUpdateClusterRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.partial_update_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_partial_update_cluster_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.PartialUpdateClusterRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'cluster': {'name': 'projects/sample1/instances/sample2/clusters/sample3'}} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.partial_update_cluster(request) - - -def test_partial_update_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'cluster': {'name': 'projects/sample1/instances/sample2/clusters/sample3'}} - - # get truthy value for each flattened field - mock_args = dict( - cluster=instance.Cluster(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.partial_update_cluster(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{cluster.name=projects/*/instances/*/clusters/*}" % client.transport._host, args[1]) - - -def test_partial_update_cluster_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.partial_update_cluster( - bigtable_instance_admin.PartialUpdateClusterRequest(), - cluster=instance.Cluster(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_partial_update_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.DeleteClusterRequest, - dict, -]) -def test_delete_cluster_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.delete_cluster(request) - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_cluster_rest_required_fields(request_type=bigtable_instance_admin.DeleteClusterRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).delete_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).delete_cluster._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "delete", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.delete_cluster(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_delete_cluster_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.delete_cluster._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_cluster_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_delete_cluster") as pre: - pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteClusterRequest.pb(bigtable_instance_admin.DeleteClusterRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_instance_admin.DeleteClusterRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_cluster(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - - -def test_delete_cluster_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.DeleteClusterRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_cluster(request) - - -def test_delete_cluster_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/clusters/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.delete_cluster(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/clusters/*}" % client.transport._host, args[1]) - - -def test_delete_cluster_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_cluster( - bigtable_instance_admin.DeleteClusterRequest(), - name='name_value', - ) - - -def test_delete_cluster_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.CreateAppProfileRequest, - dict, -]) -def test_create_app_profile_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request_init["app_profile"] = {'name': 'name_value', 'etag': 'etag_value', 'description': 'description_value', 'multi_cluster_routing_use_any': {'cluster_ids': ['cluster_ids_value1', 'cluster_ids_value2']}, 'single_cluster_routing': {'cluster_id': 'cluster_id_value', 'allow_transactional_writes': True}, 'priority': 1, 'standard_isolation': {'priority': 1}} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.CreateAppProfileRequest.meta.fields["app_profile"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["app_profile"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["app_profile"][field])): - del request_init["app_profile"][field][i][subfield] - else: - del request_init["app_profile"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile( - name='name_value', - etag='etag_value', - description='description_value', - priority=instance.AppProfile.Priority.PRIORITY_LOW, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.create_app_profile(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == 'name_value' - assert response.etag == 'etag_value' - assert response.description == 'description_value' - - -def test_create_app_profile_rest_required_fields(request_type=bigtable_instance_admin.CreateAppProfileRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["app_profile_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - assert "appProfileId" not in jsonified_request - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).create_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "appProfileId" in jsonified_request - assert jsonified_request["appProfileId"] == request_init["app_profile_id"] - - jsonified_request["parent"] = 'parent_value' - jsonified_request["appProfileId"] = 'app_profile_id_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).create_app_profile._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("app_profile_id", "ignore_warnings", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - assert "appProfileId" in jsonified_request - assert jsonified_request["appProfileId"] == 'app_profile_id_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.create_app_profile(request) - - expected_params = [ - ( - "appProfileId", - "", - ), - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_create_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.create_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == (set(("appProfileId", "ignoreWarnings", )) & set(("parent", "appProfileId", "appProfile", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_app_profile_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_create_app_profile") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_create_app_profile") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.CreateAppProfileRequest.pb(bigtable_instance_admin.CreateAppProfileRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.AppProfile.to_json(instance.AppProfile()) - - request = bigtable_instance_admin.CreateAppProfileRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.AppProfile() - - client.create_app_profile(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_app_profile_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.CreateAppProfileRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_app_profile(request) - - -def test_create_app_profile_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/instances/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - app_profile_id='app_profile_id_value', - app_profile=instance.AppProfile(name='name_value'), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.create_app_profile(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/appProfiles" % client.transport._host, args[1]) - - -def test_create_app_profile_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_app_profile( - bigtable_instance_admin.CreateAppProfileRequest(), - parent='parent_value', - app_profile_id='app_profile_id_value', - app_profile=instance.AppProfile(name='name_value'), - ) - - -def test_create_app_profile_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.GetAppProfileRequest, - dict, -]) -def test_get_app_profile_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile( - name='name_value', - etag='etag_value', - description='description_value', - priority=instance.AppProfile.Priority.PRIORITY_LOW, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_app_profile(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, instance.AppProfile) - assert response.name == 'name_value' - assert response.etag == 'etag_value' - assert response.description == 'description_value' - - -def test_get_app_profile_rest_required_fields(request_type=bigtable_instance_admin.GetAppProfileRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_app_profile(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.get_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_app_profile_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_get_app_profile") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_get_app_profile") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.GetAppProfileRequest.pb(bigtable_instance_admin.GetAppProfileRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = instance.AppProfile.to_json(instance.AppProfile()) - - request = bigtable_instance_admin.GetAppProfileRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = instance.AppProfile() - - client.get_app_profile(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_app_profile_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.GetAppProfileRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_app_profile(request) - - -def test_get_app_profile_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = instance.AppProfile() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = instance.AppProfile.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_app_profile(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/appProfiles/*}" % client.transport._host, args[1]) - - -def test_get_app_profile_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_app_profile( - bigtable_instance_admin.GetAppProfileRequest(), - name='name_value', - ) - - -def test_get_app_profile_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.ListAppProfilesRequest, - dict, -]) -def test_list_app_profiles_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListAppProfilesResponse( - next_page_token='next_page_token_value', - failed_locations=['failed_locations_value'], - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_app_profiles(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListAppProfilesPager) - assert response.next_page_token == 'next_page_token_value' - assert response.failed_locations == ['failed_locations_value'] - - -def test_list_app_profiles_rest_required_fields(request_type=bigtable_instance_admin.ListAppProfilesRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_app_profiles._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_app_profiles._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("page_size", "page_token", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListAppProfilesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_app_profiles(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_app_profiles_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.list_app_profiles._get_unset_required_fields({}) - assert set(unset_fields) == (set(("pageSize", "pageToken", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_app_profiles_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_list_app_profiles") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_list_app_profiles") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.ListAppProfilesRequest.pb(bigtable_instance_admin.ListAppProfilesRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_instance_admin.ListAppProfilesResponse.to_json(bigtable_instance_admin.ListAppProfilesResponse()) - - request = bigtable_instance_admin.ListAppProfilesRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListAppProfilesResponse() - - client.list_app_profiles(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_app_profiles_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.ListAppProfilesRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_app_profiles(request) - - -def test_list_app_profiles_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListAppProfilesResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/instances/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListAppProfilesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_app_profiles(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/appProfiles" % client.transport._host, args[1]) - - -def test_list_app_profiles_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_app_profiles( - bigtable_instance_admin.ListAppProfilesRequest(), - parent='parent_value', - ) - - -def test_list_app_profiles_rest_pager(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - #with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - instance.AppProfile(), - ], - next_page_token='abc', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[], - next_page_token='def', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - ], - next_page_token='ghi', - ), - bigtable_instance_admin.ListAppProfilesResponse( - app_profiles=[ - instance.AppProfile(), - instance.AppProfile(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple(bigtable_instance_admin.ListAppProfilesResponse.to_json(x) for x in response) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode('UTF-8') - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {'parent': 'projects/sample1/instances/sample2'} - - pager = client.list_app_profiles(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.AppProfile) - for i in results) - - pages = list(client.list_app_profiles(request=sample_request).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.UpdateAppProfileRequest, - dict, -]) -def test_update_app_profile_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'app_profile': {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'}} - request_init["app_profile"] = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3', 'etag': 'etag_value', 'description': 'description_value', 'multi_cluster_routing_use_any': {'cluster_ids': ['cluster_ids_value1', 'cluster_ids_value2']}, 'single_cluster_routing': {'cluster_id': 'cluster_id_value', 'allow_transactional_writes': True}, 'priority': 1, 'standard_isolation': {'priority': 1}} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_instance_admin.UpdateAppProfileRequest.meta.fields["app_profile"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["app_profile"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["app_profile"][field])): - del request_init["app_profile"][field][i][subfield] - else: - del request_init["app_profile"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.update_app_profile(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_update_app_profile_rest_required_fields(request_type=bigtable_instance_admin.UpdateAppProfileRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).update_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).update_app_profile._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("ignore_warnings", "update_mask", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "patch", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.update_app_profile(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_update_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.update_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == (set(("ignoreWarnings", "updateMask", )) & set(("appProfile", "updateMask", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_app_profile_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_update_app_profile") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_update_app_profile") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.UpdateAppProfileRequest.pb(bigtable_instance_admin.UpdateAppProfileRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = bigtable_instance_admin.UpdateAppProfileRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.update_app_profile(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_app_profile_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.UpdateAppProfileRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'app_profile': {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'}} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_app_profile(request) - - -def test_update_app_profile_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'app_profile': {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'}} - - # get truthy value for each flattened field - mock_args = dict( - app_profile=instance.AppProfile(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.update_app_profile(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{app_profile.name=projects/*/instances/*/appProfiles/*}" % client.transport._host, args[1]) - - -def test_update_app_profile_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_app_profile( - bigtable_instance_admin.UpdateAppProfileRequest(), - app_profile=instance.AppProfile(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_update_app_profile_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.DeleteAppProfileRequest, - dict, -]) -def test_delete_app_profile_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.delete_app_profile(request) - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_app_profile_rest_required_fields(request_type=bigtable_instance_admin.DeleteAppProfileRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["name"] = "" - request_init["ignore_warnings"] = False - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - assert "ignoreWarnings" not in jsonified_request - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).delete_app_profile._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "ignoreWarnings" in jsonified_request - assert jsonified_request["ignoreWarnings"] == request_init["ignore_warnings"] - - jsonified_request["name"] = 'name_value' - jsonified_request["ignoreWarnings"] = True - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).delete_app_profile._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("ignore_warnings", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - assert "ignoreWarnings" in jsonified_request - assert jsonified_request["ignoreWarnings"] == True - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "delete", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.delete_app_profile(request) - - expected_params = [ - ( - "ignoreWarnings", - str(False).lower(), - ), - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_delete_app_profile_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.delete_app_profile._get_unset_required_fields({}) - assert set(unset_fields) == (set(("ignoreWarnings", )) & set(("name", "ignoreWarnings", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_app_profile_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_delete_app_profile") as pre: - pre.assert_not_called() - pb_message = bigtable_instance_admin.DeleteAppProfileRequest.pb(bigtable_instance_admin.DeleteAppProfileRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_instance_admin.DeleteAppProfileRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_app_profile(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - - -def test_delete_app_profile_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.DeleteAppProfileRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_app_profile(request) - - -def test_delete_app_profile_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/appProfiles/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.delete_app_profile(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/appProfiles/*}" % client.transport._host, args[1]) - - -def test_delete_app_profile_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_app_profile( - bigtable_instance_admin.DeleteAppProfileRequest(), - name='name_value', - ) - - -def test_delete_app_profile_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - iam_policy_pb2.GetIamPolicyRequest, - dict, -]) -def test_get_iam_policy_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'resource': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b'etag_blob', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_iam_policy(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b'etag_blob' - - -def test_get_iam_policy_rest_required_fields(request_type=iam_policy_pb2.GetIamPolicyRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = 'resource_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == 'resource_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_iam_policy(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.get_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("resource", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_iam_policy_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_get_iam_policy") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_get_iam_policy") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.GetIamPolicyRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) - - request = iam_policy_pb2.GetIamPolicyRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() - - client.get_iam_policy(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=iam_policy_pb2.GetIamPolicyRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'resource': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_iam_policy(request) - - -def test_get_iam_policy_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = {'resource': 'projects/sample1/instances/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - resource='resource_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_iam_policy(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{resource=projects/*/instances/*}:getIamPolicy" % client.transport._host, args[1]) - - -def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource='resource_value', - ) - - -def test_get_iam_policy_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - iam_policy_pb2.SetIamPolicyRequest, - dict, -]) -def test_set_iam_policy_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'resource': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b'etag_blob', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.set_iam_policy(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b'etag_blob' - - -def test_set_iam_policy_rest_required_fields(request_type=iam_policy_pb2.SetIamPolicyRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = 'resource_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == 'resource_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.set_iam_policy(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_set_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.set_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("resource", "policy", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_set_iam_policy_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_set_iam_policy") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_set_iam_policy") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.SetIamPolicyRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) - - request = iam_policy_pb2.SetIamPolicyRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() - - client.set_iam_policy(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=iam_policy_pb2.SetIamPolicyRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'resource': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.set_iam_policy(request) - - -def test_set_iam_policy_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = {'resource': 'projects/sample1/instances/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - resource='resource_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.set_iam_policy(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{resource=projects/*/instances/*}:setIamPolicy" % client.transport._host, args[1]) - - -def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource='resource_value', - ) - - -def test_set_iam_policy_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - iam_policy_pb2.TestIamPermissionsRequest, - dict, -]) -def test_test_iam_permissions_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'resource': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=['permissions_value'], - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.test_iam_permissions(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ['permissions_value'] - - -def test_test_iam_permissions_rest_required_fields(request_type=iam_policy_pb2.TestIamPermissionsRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request_init["permissions"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = 'resource_value' - jsonified_request["permissions"] = 'permissions_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == 'resource_value' - assert "permissions" in jsonified_request - assert jsonified_request["permissions"] == 'permissions_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.test_iam_permissions(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_test_iam_permissions_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("resource", "permissions", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_test_iam_permissions_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_test_iam_permissions") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_test_iam_permissions") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.TestIamPermissionsRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(iam_policy_pb2.TestIamPermissionsResponse()) - - request = iam_policy_pb2.TestIamPermissionsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = iam_policy_pb2.TestIamPermissionsResponse() - - client.test_iam_permissions(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=iam_policy_pb2.TestIamPermissionsRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'resource': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.test_iam_permissions(request) - - -def test_test_iam_permissions_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'resource': 'projects/sample1/instances/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - resource='resource_value', - permissions=['permissions_value'], - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.test_iam_permissions(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{resource=projects/*/instances/*}:testIamPermissions" % client.transport._host, args[1]) - - -def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource='resource_value', - permissions=['permissions_value'], - ) - - -def test_test_iam_permissions_rest_error(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_instance_admin.ListHotTabletsRequest, - dict, -]) -def test_list_hot_tablets_rest(request_type): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse( - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_hot_tablets(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListHotTabletsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_hot_tablets_rest_required_fields(request_type=bigtable_instance_admin.ListHotTabletsRequest): - transport_class = transports.BigtableInstanceAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_hot_tablets._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_hot_tablets._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("end_time", "page_size", "page_token", "start_time", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_hot_tablets(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_hot_tablets_rest_unset_required_fields(): - transport = transports.BigtableInstanceAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.list_hot_tablets._get_unset_required_fields({}) - assert set(unset_fields) == (set(("endTime", "pageSize", "pageToken", "startTime", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_hot_tablets_rest_interceptors(null_interceptor): - transport = transports.BigtableInstanceAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), - ) - client = BigtableInstanceAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "post_list_hot_tablets") as post, \ - mock.patch.object(transports.BigtableInstanceAdminRestInterceptor, "pre_list_hot_tablets") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_instance_admin.ListHotTabletsRequest.pb(bigtable_instance_admin.ListHotTabletsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_instance_admin.ListHotTabletsResponse.to_json(bigtable_instance_admin.ListHotTabletsResponse()) - - request = bigtable_instance_admin.ListHotTabletsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_instance_admin.ListHotTabletsResponse() - - client.list_hot_tablets(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_hot_tablets_rest_bad_request(transport: str = 'rest', request_type=bigtable_instance_admin.ListHotTabletsRequest): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_hot_tablets(request) - - -def test_list_hot_tablets_rest_flattened(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_instance_admin.ListHotTabletsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_instance_admin.ListHotTabletsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_hot_tablets(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*/instances/*/clusters/*}/hotTablets" % client.transport._host, args[1]) - - -def test_list_hot_tablets_rest_flattened_error(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_hot_tablets( - bigtable_instance_admin.ListHotTabletsRequest(), - parent='parent_value', - ) - - -def test_list_hot_tablets_rest_pager(transport: str = 'rest'): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - #with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - instance.HotTablet(), - ], - next_page_token='abc', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[], - next_page_token='def', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - ], - next_page_token='ghi', - ), - bigtable_instance_admin.ListHotTabletsResponse( - hot_tablets=[ - instance.HotTablet(), - instance.HotTablet(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple(bigtable_instance_admin.ListHotTabletsResponse.to_json(x) for x in response) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode('UTF-8') - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - - pager = client.list_hot_tablets(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, instance.HotTablet) - for i in results) - - pages = list(client.list_hot_tablets(request=sample_request).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options=options, - transport=transport, - ) - - # It is an error to provide an api_key and a credential. - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options=options, - credentials=_AnonymousCredentialsWithUniverseDomain() - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - with pytest.raises(ValueError): - client = BigtableInstanceAdminClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - client = BigtableInstanceAdminClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - transports.BigtableInstanceAdminRestTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - transport_class() - adc.assert_called_once() - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "rest", -]) -def test_transport_kind(transport_name): - transport = BigtableInstanceAdminClient.get_transport_class(transport_name)( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - assert transport.kind == transport_name - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - assert isinstance( - client.transport, - transports.BigtableInstanceAdminGrpcTransport, - ) - -def test_bigtable_instance_admin_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.BigtableInstanceAdminTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - credentials_file="credentials.json" - ) - - -def test_bigtable_instance_admin_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.BigtableInstanceAdminTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_instance', - 'get_instance', - 'list_instances', - 'update_instance', - 'partial_update_instance', - 'delete_instance', - 'create_cluster', - 'get_cluster', - 'list_clusters', - 'update_cluster', - 'partial_update_cluster', - 'delete_cluster', - 'create_app_profile', - 'get_app_profile', - 'list_app_profiles', - 'update_app_profile', - 'delete_app_profile', - 'get_iam_policy', - 'set_iam_policy', - 'test_iam_permissions', - 'list_hot_tablets', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - # Catch all for all remaining methods and properties - remainder = [ - 'kind', - ] - for r in remainder: - with pytest.raises(NotImplementedError): - getattr(transport, r)() - - -def test_bigtable_instance_admin_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - transport = transports.BigtableInstanceAdminTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.cluster', - 'https://www.googleapis.com/auth/bigtable.admin.instance', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - quota_project_id="octopus", - ) - - -def test_bigtable_instance_admin_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - transport = transports.BigtableInstanceAdminTransport() - adc.assert_called_once() - - -def test_bigtable_instance_admin_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - BigtableInstanceAdminClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.cluster', - 'https://www.googleapis.com/auth/bigtable.admin.instance', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - ], -) -def test_bigtable_instance_admin_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/bigtable.admin', 'https://www.googleapis.com/auth/bigtable.admin.cluster', 'https://www.googleapis.com/auth/bigtable.admin.instance', 'https://www.googleapis.com/auth/cloud-bigtable.admin', 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/cloud-platform.read-only',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableInstanceAdminGrpcTransport, - transports.BigtableInstanceAdminGrpcAsyncIOTransport, - transports.BigtableInstanceAdminRestTransport, - ], -) -def test_bigtable_instance_admin_transport_auth_gdch_credentials(transport_class): - host = 'https://language.com' - api_audience_tests = [None, 'https://language2.com'] - api_audience_expect = [host, 'https://language2.com'] - for t, e in zip(api_audience_tests, api_audience_expect): - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - gdch_mock = mock.MagicMock() - type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) - adc.return_value = (gdch_mock, None) - transport_class(host=host, api_audience=t) - gdch_mock.with_gdch_audience.assert_called_once_with( - e - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.BigtableInstanceAdminGrpcTransport, grpc_helpers), - (transports.BigtableInstanceAdminGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_bigtable_instance_admin_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = _AnonymousCredentialsWithUniverseDomain() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "bigtableadmin.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.cluster', - 'https://www.googleapis.com/auth/bigtable.admin.instance', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.cluster', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - scopes=["1", "2"], - default_host="bigtableadmin.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.BigtableInstanceAdminGrpcTransport, transports.BigtableInstanceAdminGrpcAsyncIOTransport]) -def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = _AnonymousCredentialsWithUniverseDomain() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - -def test_bigtable_instance_admin_http_transport_client_cert_source_for_mtls(): - cred = _AnonymousCredentialsWithUniverseDomain() - with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: - transports.BigtableInstanceAdminRestTransport ( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) - - -def test_bigtable_instance_admin_rest_lro_client(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.AbstractOperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_bigtable_instance_admin_host_no_port(transport_name): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - client_options=client_options.ClientOptions(api_endpoint='bigtableadmin.googleapis.com'), - transport=transport_name, - ) - assert client.transport._host == ( - 'bigtableadmin.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://bigtableadmin.googleapis.com' - ) - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_bigtable_instance_admin_host_with_port(transport_name): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - client_options=client_options.ClientOptions(api_endpoint='bigtableadmin.googleapis.com:8000'), - transport=transport_name, - ) - assert client.transport._host == ( - 'bigtableadmin.googleapis.com:8000' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://bigtableadmin.googleapis.com:8000' - ) - -@pytest.mark.parametrize("transport_name", [ - "rest", -]) -def test_bigtable_instance_admin_client_transport_session_collision(transport_name): - creds1 = _AnonymousCredentialsWithUniverseDomain() - creds2 = _AnonymousCredentialsWithUniverseDomain() - client1 = BigtableInstanceAdminClient( - credentials=creds1, - transport=transport_name, - ) - client2 = BigtableInstanceAdminClient( - credentials=creds2, - transport=transport_name, - ) - session1 = client1.transport.create_instance._session - session2 = client2.transport.create_instance._session - assert session1 != session2 - session1 = client1.transport.get_instance._session - session2 = client2.transport.get_instance._session - assert session1 != session2 - session1 = client1.transport.list_instances._session - session2 = client2.transport.list_instances._session - assert session1 != session2 - session1 = client1.transport.update_instance._session - session2 = client2.transport.update_instance._session - assert session1 != session2 - session1 = client1.transport.partial_update_instance._session - session2 = client2.transport.partial_update_instance._session - assert session1 != session2 - session1 = client1.transport.delete_instance._session - session2 = client2.transport.delete_instance._session - assert session1 != session2 - session1 = client1.transport.create_cluster._session - session2 = client2.transport.create_cluster._session - assert session1 != session2 - session1 = client1.transport.get_cluster._session - session2 = client2.transport.get_cluster._session - assert session1 != session2 - session1 = client1.transport.list_clusters._session - session2 = client2.transport.list_clusters._session - assert session1 != session2 - session1 = client1.transport.update_cluster._session - session2 = client2.transport.update_cluster._session - assert session1 != session2 - session1 = client1.transport.partial_update_cluster._session - session2 = client2.transport.partial_update_cluster._session - assert session1 != session2 - session1 = client1.transport.delete_cluster._session - session2 = client2.transport.delete_cluster._session - assert session1 != session2 - session1 = client1.transport.create_app_profile._session - session2 = client2.transport.create_app_profile._session - assert session1 != session2 - session1 = client1.transport.get_app_profile._session - session2 = client2.transport.get_app_profile._session - assert session1 != session2 - session1 = client1.transport.list_app_profiles._session - session2 = client2.transport.list_app_profiles._session - assert session1 != session2 - session1 = client1.transport.update_app_profile._session - session2 = client2.transport.update_app_profile._session - assert session1 != session2 - session1 = client1.transport.delete_app_profile._session - session2 = client2.transport.delete_app_profile._session - assert session1 != session2 - session1 = client1.transport.get_iam_policy._session - session2 = client2.transport.get_iam_policy._session - assert session1 != session2 - session1 = client1.transport.set_iam_policy._session - session2 = client2.transport.set_iam_policy._session - assert session1 != session2 - session1 = client1.transport.test_iam_permissions._session - session2 = client2.transport.test_iam_permissions._session - assert session1 != session2 - session1 = client1.transport.list_hot_tablets._session - session2 = client2.transport.list_hot_tablets._session - assert session1 != session2 -def test_bigtable_instance_admin_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.BigtableInstanceAdminGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_bigtable_instance_admin_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.BigtableInstanceAdminGrpcTransport, transports.BigtableInstanceAdminGrpcAsyncIOTransport]) -def test_bigtable_instance_admin_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = _AnonymousCredentialsWithUniverseDomain() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.BigtableInstanceAdminGrpcTransport, transports.BigtableInstanceAdminGrpcAsyncIOTransport]) -def test_bigtable_instance_admin_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_bigtable_instance_admin_grpc_lro_client(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_bigtable_instance_admin_grpc_lro_async_client(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_app_profile_path(): - project = "squid" - instance = "clam" - app_profile = "whelk" - expected = "projects/{project}/instances/{instance}/appProfiles/{app_profile}".format(project=project, instance=instance, app_profile=app_profile, ) - actual = BigtableInstanceAdminClient.app_profile_path(project, instance, app_profile) - assert expected == actual - - -def test_parse_app_profile_path(): - expected = { - "project": "octopus", - "instance": "oyster", - "app_profile": "nudibranch", - } - path = BigtableInstanceAdminClient.app_profile_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_app_profile_path(path) - assert expected == actual - -def test_cluster_path(): - project = "cuttlefish" - instance = "mussel" - cluster = "winkle" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format(project=project, instance=instance, cluster=cluster, ) - actual = BigtableInstanceAdminClient.cluster_path(project, instance, cluster) - assert expected == actual - - -def test_parse_cluster_path(): - expected = { - "project": "nautilus", - "instance": "scallop", - "cluster": "abalone", - } - path = BigtableInstanceAdminClient.cluster_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_cluster_path(path) - assert expected == actual - -def test_crypto_key_path(): - project = "squid" - location = "clam" - key_ring = "whelk" - crypto_key = "octopus" - expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}".format(project=project, location=location, key_ring=key_ring, crypto_key=crypto_key, ) - actual = BigtableInstanceAdminClient.crypto_key_path(project, location, key_ring, crypto_key) - assert expected == actual - - -def test_parse_crypto_key_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - "key_ring": "cuttlefish", - "crypto_key": "mussel", - } - path = BigtableInstanceAdminClient.crypto_key_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_crypto_key_path(path) - assert expected == actual - -def test_hot_tablet_path(): - project = "winkle" - instance = "nautilus" - cluster = "scallop" - hot_tablet = "abalone" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}/hotTablets/{hot_tablet}".format(project=project, instance=instance, cluster=cluster, hot_tablet=hot_tablet, ) - actual = BigtableInstanceAdminClient.hot_tablet_path(project, instance, cluster, hot_tablet) - assert expected == actual - - -def test_parse_hot_tablet_path(): - expected = { - "project": "squid", - "instance": "clam", - "cluster": "whelk", - "hot_tablet": "octopus", - } - path = BigtableInstanceAdminClient.hot_tablet_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_hot_tablet_path(path) - assert expected == actual - -def test_instance_path(): - project = "oyster" - instance = "nudibranch" - expected = "projects/{project}/instances/{instance}".format(project=project, instance=instance, ) - actual = BigtableInstanceAdminClient.instance_path(project, instance) - assert expected == actual - - -def test_parse_instance_path(): - expected = { - "project": "cuttlefish", - "instance": "mussel", - } - path = BigtableInstanceAdminClient.instance_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_instance_path(path) - assert expected == actual - -def test_table_path(): - project = "winkle" - instance = "nautilus" - table = "scallop" - expected = "projects/{project}/instances/{instance}/tables/{table}".format(project=project, instance=instance, table=table, ) - actual = BigtableInstanceAdminClient.table_path(project, instance, table) - assert expected == actual - - -def test_parse_table_path(): - expected = { - "project": "abalone", - "instance": "squid", - "table": "clam", - } - path = BigtableInstanceAdminClient.table_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_table_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "whelk" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = BigtableInstanceAdminClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "octopus", - } - path = BigtableInstanceAdminClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "oyster" - expected = "folders/{folder}".format(folder=folder, ) - actual = BigtableInstanceAdminClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nudibranch", - } - path = BigtableInstanceAdminClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "cuttlefish" - expected = "organizations/{organization}".format(organization=organization, ) - actual = BigtableInstanceAdminClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "mussel", - } - path = BigtableInstanceAdminClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "winkle" - expected = "projects/{project}".format(project=project, ) - actual = BigtableInstanceAdminClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "nautilus", - } - path = BigtableInstanceAdminClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "scallop" - location = "abalone" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = BigtableInstanceAdminClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "squid", - "location": "clam", - } - path = BigtableInstanceAdminClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableInstanceAdminClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_with_default_client_info(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.BigtableInstanceAdminTransport, '_prep_wrapped_messages') as prep: - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.BigtableInstanceAdminTransport, '_prep_wrapped_messages') as prep: - transport_class = BigtableInstanceAdminClient.get_transport_class() - transport = transport_class( - credentials=_AnonymousCredentialsWithUniverseDomain(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = BigtableInstanceAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - - -def test_transport_close(): - transports = { - "rest": "_session", - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'rest', - 'grpc', - ] - for transport in transports: - client = BigtableInstanceAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() - -@pytest.mark.parametrize("client_class,transport_class", [ - (BigtableInstanceAdminClient, transports.BigtableInstanceAdminGrpcTransport), - (BigtableInstanceAdminAsyncClient, transports.BigtableInstanceAdminGrpcAsyncIOTransport), -]) -def test_api_key_credentials(client_class, transport_class): - with mock.patch.object( - google.auth._default, "get_api_key_credentials", create=True - ) as get_api_key_credentials: - mock_cred = mock.Mock() - get_api_key_credentials.return_value = mock_cred - options = client_options.ClientOptions() - options.api_key = "api_key" - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=mock_cred, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) diff --git a/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py deleted file mode 100644 index 37e84aa3a..000000000 --- a/owl-bot-staging/bigtable_admin/v2/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ /dev/null @@ -1,14356 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright 2023 Google LLC -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -import os -# try/except added for compatibility with python < 3.8 -try: - from unittest import mock - from unittest.mock import AsyncMock # pragma: NO COVER -except ImportError: # pragma: NO COVER - import mock - -import grpc -from grpc.experimental import aio -from collections.abc import Iterable -from google.protobuf import json_format -import json -import math -import pytest -from google.api_core import api_core_version -from proto.marshal.rules.dates import DurationRule, TimestampRule -from proto.marshal.rules import wrappers -from requests import Response -from requests import Request, PreparedRequest -from requests.sessions import Session -from google.protobuf import json_format - -from google.api_core import client_options -from google.api_core import exceptions as core_exceptions -from google.api_core import future -from google.api_core import gapic_v1 -from google.api_core import grpc_helpers -from google.api_core import grpc_helpers_async -from google.api_core import operation -from google.api_core import operation_async # type: ignore -from google.api_core import operations_v1 -from google.api_core import path_template -from google.auth import credentials as ga_credentials -from google.auth.exceptions import MutualTLSChannelError -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import BigtableTableAdminAsyncClient -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import BigtableTableAdminClient -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import pagers -from google.cloud.bigtable_admin_v2.services.bigtable_table_admin import transports -from google.cloud.bigtable_admin_v2.types import bigtable_table_admin -from google.cloud.bigtable_admin_v2.types import table -from google.cloud.bigtable_admin_v2.types import table as gba_table -from google.iam.v1 import iam_policy_pb2 # type: ignore -from google.iam.v1 import options_pb2 # type: ignore -from google.iam.v1 import policy_pb2 # type: ignore -from google.longrunning import operations_pb2 # type: ignore -from google.oauth2 import service_account -from google.protobuf import any_pb2 # type: ignore -from google.protobuf import duration_pb2 # type: ignore -from google.protobuf import field_mask_pb2 # type: ignore -from google.protobuf import timestamp_pb2 # type: ignore -from google.rpc import status_pb2 # type: ignore -from google.type import expr_pb2 # type: ignore -import google.auth - - -def client_cert_source_callback(): - return b"cert bytes", b"key bytes" - -# If default endpoint is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint(client): - return "foo.googleapis.com" if ("localhost" in client.DEFAULT_ENDPOINT) else client.DEFAULT_ENDPOINT - -# If default endpoint template is localhost, then default mtls endpoint will be the same. -# This method modifies the default endpoint template so the client can produce a different -# mtls endpoint for endpoint testing purposes. -def modify_default_endpoint_template(client): - return "test.{UNIVERSE_DOMAIN}" if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) else client._DEFAULT_ENDPOINT_TEMPLATE - -# Anonymous Credentials with universe domain property. If no universe domain is provided, then -# the default universe domain is "googleapis.com". -class _AnonymousCredentialsWithUniverseDomain(ga_credentials.AnonymousCredentials): - def __init__(self, universe_domain="googleapis.com"): - super(_AnonymousCredentialsWithUniverseDomain, self).__init__() - self._universe_domain = universe_domain - - @property - def universe_domain(self): - return self._universe_domain - -def test__get_default_mtls_endpoint(): - api_endpoint = "example.googleapis.com" - api_mtls_endpoint = "example.mtls.googleapis.com" - sandbox_endpoint = "example.sandbox.googleapis.com" - sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com" - non_googleapi = "api.example.com" - - assert BigtableTableAdminClient._get_default_mtls_endpoint(None) is None - assert BigtableTableAdminClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint - assert BigtableTableAdminClient._get_default_mtls_endpoint(api_mtls_endpoint) == api_mtls_endpoint - assert BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_endpoint) == sandbox_mtls_endpoint - assert BigtableTableAdminClient._get_default_mtls_endpoint(sandbox_mtls_endpoint) == sandbox_mtls_endpoint - assert BigtableTableAdminClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi - -def test__read_environment_variables(): - assert BigtableTableAdminClient._read_environment_variables() == (False, "auto", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - assert BigtableTableAdminClient._read_environment_variables() == (True, "auto", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - assert BigtableTableAdminClient._read_environment_variables() == (False, "auto", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError) as excinfo: - BigtableTableAdminClient._read_environment_variables() - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - assert BigtableTableAdminClient._read_environment_variables() == (False, "never", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - assert BigtableTableAdminClient._read_environment_variables() == (False, "always", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): - assert BigtableTableAdminClient._read_environment_variables() == (False, "auto", None) - - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - BigtableTableAdminClient._read_environment_variables() - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - - with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): - assert BigtableTableAdminClient._read_environment_variables() == (False, "auto", "foo.com") - -def test__get_client_cert_source(): - mock_provided_cert_source = mock.Mock() - mock_default_cert_source = mock.Mock() - - assert BigtableTableAdminClient._get_client_cert_source(None, False) is None - assert BigtableTableAdminClient._get_client_cert_source(mock_provided_cert_source, False) is None - assert BigtableTableAdminClient._get_client_cert_source(mock_provided_cert_source, True) == mock_provided_cert_source - - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_default_cert_source): - assert BigtableTableAdminClient._get_client_cert_source(None, True) is mock_default_cert_source - assert BigtableTableAdminClient._get_client_cert_source(mock_provided_cert_source, "true") is mock_provided_cert_source - -@mock.patch.object(BigtableTableAdminClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableTableAdminClient)) -@mock.patch.object(BigtableTableAdminAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableTableAdminAsyncClient)) -def test__get_api_endpoint(): - api_override = "foo.com" - mock_client_cert_source = mock.Mock() - default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE - default_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=default_universe) - mock_universe = "bar.com" - mock_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=mock_universe) - - assert BigtableTableAdminClient._get_api_endpoint(api_override, mock_client_cert_source, default_universe, "always") == api_override - assert BigtableTableAdminClient._get_api_endpoint(None, mock_client_cert_source, default_universe, "auto") == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT - assert BigtableTableAdminClient._get_api_endpoint(None, None, default_universe, "auto") == default_endpoint - assert BigtableTableAdminClient._get_api_endpoint(None, None, default_universe, "always") == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT - assert BigtableTableAdminClient._get_api_endpoint(None, mock_client_cert_source, default_universe, "always") == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT - assert BigtableTableAdminClient._get_api_endpoint(None, None, mock_universe, "never") == mock_endpoint - assert BigtableTableAdminClient._get_api_endpoint(None, None, default_universe, "never") == default_endpoint - - with pytest.raises(MutualTLSChannelError) as excinfo: - BigtableTableAdminClient._get_api_endpoint(None, mock_client_cert_source, mock_universe, "auto") - assert str(excinfo.value) == "mTLS is not supported in any universe other than googleapis.com." - -def test__get_universe_domain(): - client_universe_domain = "foo.com" - universe_domain_env = "bar.com" - - assert BigtableTableAdminClient._get_universe_domain(client_universe_domain, universe_domain_env) == client_universe_domain - assert BigtableTableAdminClient._get_universe_domain(None, universe_domain_env) == universe_domain_env - assert BigtableTableAdminClient._get_universe_domain(None, None) == BigtableTableAdminClient._DEFAULT_UNIVERSE - - with pytest.raises(ValueError) as excinfo: - BigtableTableAdminClient._get_universe_domain("", None) - assert str(excinfo.value) == "Universe Domain cannot be an empty string." - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), - (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), -]) -def test__validate_universe_domain(client_class, transport_class, transport_name): - client = client_class( - transport=transport_class( - credentials=_AnonymousCredentialsWithUniverseDomain() - ) - ) - assert client._validate_universe_domain() == True - - # Test the case when universe is already validated. - assert client._validate_universe_domain() == True - - if transport_name == "grpc": - # Test the case where credentials are provided by the - # `local_channel_credentials`. The default universes in both match. - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - client = client_class(transport=transport_class(channel=channel)) - assert client._validate_universe_domain() == True - - # Test the case where credentials do not exist: e.g. a transport is provided - # with no credentials. Validation should still succeed because there is no - # mismatch with non-existent credentials. - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - transport=transport_class(channel=channel) - transport._credentials = None - client = client_class(transport=transport) - assert client._validate_universe_domain() == True - - # Test the case when there is a universe mismatch from the credentials. - client = client_class( - transport=transport_class(credentials=_AnonymousCredentialsWithUniverseDomain(universe_domain="foo.com")) - ) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert str(excinfo.value) == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - - # Test the case when there is a universe mismatch from the client. - # - # TODO: Make this test unconditional once the minimum supported version of - # google-api-core becomes 2.15.0 or higher. - api_core_major, api_core_minor, _ = [int(part) for part in api_core_version.__version__.split(".")] - if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): - client = client_class(client_options={"universe_domain": "bar.com"}, transport=transport_class(credentials=_AnonymousCredentialsWithUniverseDomain(),)) - with pytest.raises(ValueError) as excinfo: - client._validate_universe_domain() - assert str(excinfo.value) == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." - -@pytest.mark.parametrize("client_class,transport_name", [ - (BigtableTableAdminClient, "grpc"), - (BigtableTableAdminAsyncClient, "grpc_asyncio"), - (BigtableTableAdminClient, "rest"), -]) -def test_bigtable_table_admin_client_from_service_account_info(client_class, transport_name): - creds = _AnonymousCredentialsWithUniverseDomain() - with mock.patch.object(service_account.Credentials, 'from_service_account_info') as factory: - factory.return_value = creds - info = {"valid": True} - client = client_class.from_service_account_info(info, transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'bigtableadmin.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://bigtableadmin.googleapis.com' - ) - - -@pytest.mark.parametrize("transport_class,transport_name", [ - (transports.BigtableTableAdminGrpcTransport, "grpc"), - (transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio"), - (transports.BigtableTableAdminRestTransport, "rest"), -]) -def test_bigtable_table_admin_client_service_account_always_use_jwt(transport_class, transport_name): - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=True) - use_jwt.assert_called_once_with(True) - - with mock.patch.object(service_account.Credentials, 'with_always_use_jwt_access', create=True) as use_jwt: - creds = service_account.Credentials(None, None, None) - transport = transport_class(credentials=creds, always_use_jwt_access=False) - use_jwt.assert_not_called() - - -@pytest.mark.parametrize("client_class,transport_name", [ - (BigtableTableAdminClient, "grpc"), - (BigtableTableAdminAsyncClient, "grpc_asyncio"), - (BigtableTableAdminClient, "rest"), -]) -def test_bigtable_table_admin_client_from_service_account_file(client_class, transport_name): - creds = _AnonymousCredentialsWithUniverseDomain() - with mock.patch.object(service_account.Credentials, 'from_service_account_file') as factory: - factory.return_value = creds - client = client_class.from_service_account_file("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - client = client_class.from_service_account_json("dummy/file/path.json", transport=transport_name) - assert client.transport._credentials == creds - assert isinstance(client, client_class) - - assert client.transport._host == ( - 'bigtableadmin.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else - 'https://bigtableadmin.googleapis.com' - ) - - -def test_bigtable_table_admin_client_get_transport_class(): - transport = BigtableTableAdminClient.get_transport_class() - available_transports = [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminRestTransport, - ] - assert transport in available_transports - - transport = BigtableTableAdminClient.get_transport_class("grpc") - assert transport == transports.BigtableTableAdminGrpcTransport - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), - (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio"), - (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), -]) -@mock.patch.object(BigtableTableAdminClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableTableAdminClient)) -@mock.patch.object(BigtableTableAdminAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableTableAdminAsyncClient)) -def test_bigtable_table_admin_client_client_options(client_class, transport_class, transport_name): - # Check that if channel is provided we won't create a new one. - with mock.patch.object(BigtableTableAdminClient, 'get_transport_class') as gtc: - transport = transport_class( - credentials=_AnonymousCredentialsWithUniverseDomain() - ) - client = client_class(transport=transport) - gtc.assert_not_called() - - # Check that if channel is provided via str we will create a new one. - with mock.patch.object(BigtableTableAdminClient, 'get_transport_class') as gtc: - client = client_class(transport=transport_name) - gtc.assert_called() - - # Check the case api_endpoint is provided. - options = client_options.ClientOptions(api_endpoint="squid.clam.whelk") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name, client_options=options) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is - # "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client.DEFAULT_MTLS_ENDPOINT, - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - client = client_class(transport=transport_name) - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError) as excinfo: - client = client_class(transport=transport_name) - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - - # Check the case quota_project_id is provided - options = client_options.ClientOptions(quota_project_id="octopus") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id="octopus", - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - # Check the case api_endpoint is provided - options = client_options.ClientOptions(api_audience="https://language.googleapis.com") - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience="https://language.googleapis.com" - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,use_client_cert_env", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", "true"), - (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", "true"), - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", "false"), - (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", "false"), - (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest", "true"), - (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest", "false"), -]) -@mock.patch.object(BigtableTableAdminClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableTableAdminClient)) -@mock.patch.object(BigtableTableAdminAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableTableAdminAsyncClient)) -@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) -def test_bigtable_table_admin_client_mtls_env_auto(client_class, transport_class, transport_name, use_client_cert_env): - # This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default - # mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists. - - # Check the case client_cert_source is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - options = client_options.ClientOptions(client_cert_source=client_cert_source_callback) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - - if use_client_cert_env == "false": - expected_client_cert_source = None - expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE) - else: - expected_client_cert_source = client_cert_source_callback - expected_host = client.DEFAULT_MTLS_ENDPOINT - - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case ADC client cert is provided. Whether client cert is used depends on - # GOOGLE_API_USE_CLIENT_CERTIFICATE value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=client_cert_source_callback): - if use_client_cert_env == "false": - expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE) - expected_client_cert_source = None - else: - expected_host = client.DEFAULT_MTLS_ENDPOINT - expected_client_cert_source = client_cert_source_callback - - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=expected_host, - scopes=None, - client_cert_source_for_mtls=expected_client_cert_source, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # Check the case client_cert_source and ADC client cert are not provided. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}): - with mock.patch.object(transport_class, '__init__') as patched: - with mock.patch("google.auth.transport.mtls.has_default_client_cert_source", return_value=False): - patched.return_value = None - client = client_class(transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class", [ - BigtableTableAdminClient, BigtableTableAdminAsyncClient -]) -@mock.patch.object(BigtableTableAdminClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableTableAdminClient)) -@mock.patch.object(BigtableTableAdminAsyncClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableTableAdminAsyncClient)) -def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_class): - mock_client_cert_source = mock.Mock() - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source == mock_client_cert_source - - # Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): - mock_client_cert_source = mock.Mock() - mock_api_endpoint = "foo" - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint) - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(options) - assert api_endpoint == mock_api_endpoint - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always". - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=False): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_ENDPOINT - assert cert_source is None - - # Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch('google.auth.transport.mtls.has_default_client_cert_source', return_value=True): - with mock.patch('google.auth.transport.mtls.default_client_cert_source', return_value=mock_client_cert_source): - api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source() - assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - assert cert_source == mock_client_cert_source - - # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has - # unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError) as excinfo: - client_class.get_mtls_endpoint_and_cert_source() - - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" - - # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}): - with pytest.raises(ValueError) as excinfo: - client_class.get_mtls_endpoint_and_cert_source() - - assert str(excinfo.value) == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" - -@pytest.mark.parametrize("client_class", [ - BigtableTableAdminClient, BigtableTableAdminAsyncClient -]) -@mock.patch.object(BigtableTableAdminClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableTableAdminClient)) -@mock.patch.object(BigtableTableAdminAsyncClient, "_DEFAULT_ENDPOINT_TEMPLATE", modify_default_endpoint_template(BigtableTableAdminAsyncClient)) -def test_bigtable_table_admin_client_client_api_endpoint(client_class): - mock_client_cert_source = client_cert_source_callback - api_override = "foo.com" - default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE - default_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=default_universe) - mock_universe = "bar.com" - mock_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=mock_universe) - - # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", - # use ClientOptions.api_endpoint as the api endpoint regardless. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): - with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel"): - options = client_options.ClientOptions(client_cert_source=mock_client_cert_source, api_endpoint=api_override) - client = client_class(client_options=options, credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == api_override - - # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - client = client_class(credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == default_endpoint - - # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", - # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): - client = client_class(credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT - - # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), - # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, - # and ClientOptions.universe_domain="bar.com", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. - options = client_options.ClientOptions() - universe_exists = hasattr(options, "universe_domain") - if universe_exists: - options = client_options.ClientOptions(universe_domain=mock_universe) - client = client_class(client_options=options, credentials=_AnonymousCredentialsWithUniverseDomain()) - else: - client = client_class(client_options=options, credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == (mock_endpoint if universe_exists else default_endpoint) - assert client.universe_domain == (mock_universe if universe_exists else default_universe) - - # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", - # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. - options = client_options.ClientOptions() - if hasattr(options, "universe_domain"): - delattr(options, "universe_domain") - with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): - client = client_class(client_options=options, credentials=_AnonymousCredentialsWithUniverseDomain()) - assert client.api_endpoint == default_endpoint - - -@pytest.mark.parametrize("client_class,transport_class,transport_name", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), - (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio"), - (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), -]) -def test_bigtable_table_admin_client_client_options_scopes(client_class, transport_class, transport_name): - # Check the case scopes are provided. - options = client_options.ClientOptions( - scopes=["1", "2"], - ) - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=["1", "2"], - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", grpc_helpers), - (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), - (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest", None), -]) -def test_bigtable_table_admin_client_client_options_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - -def test_bigtable_table_admin_client_client_options_from_dict(): - with mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminGrpcTransport.__init__') as grpc_transport: - grpc_transport.return_value = None - client = BigtableTableAdminClient( - client_options={'api_endpoint': 'squid.clam.whelk'} - ) - grpc_transport.assert_called_once_with( - credentials=None, - credentials_file=None, - host="squid.clam.whelk", - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - -@pytest.mark.parametrize("client_class,transport_class,transport_name,grpc_helpers", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc", grpc_helpers), - (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport, "grpc_asyncio", grpc_helpers_async), -]) -def test_bigtable_table_admin_client_create_channel_credentials_file(client_class, transport_class, transport_name, grpc_helpers): - # Check the case credentials file is provided. - options = client_options.ClientOptions( - credentials_file="credentials.json" - ) - - with mock.patch.object(transport_class, '__init__') as patched: - patched.return_value = None - client = client_class(client_options=options, transport=transport_name) - patched.assert_called_once_with( - credentials=None, - credentials_file="credentials.json", - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) - - # test that the credentials from file are saved and used as the credentials. - with mock.patch.object( - google.auth, "load_credentials_from_file", autospec=True - ) as load_creds, mock.patch.object( - google.auth, "default", autospec=True - ) as adc, mock.patch.object( - grpc_helpers, "create_channel" - ) as create_channel: - creds = _AnonymousCredentialsWithUniverseDomain() - file_creds = _AnonymousCredentialsWithUniverseDomain() - load_creds.return_value = (file_creds, None) - adc.return_value = (creds, None) - client = client_class(client_options=options, transport=transport_name) - create_channel.assert_called_with( - "bigtableadmin.googleapis.com:443", - credentials=file_creds, - credentials_file=None, - quota_project_id=None, - default_scopes=( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - scopes=None, - default_host="bigtableadmin.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.CreateTableRequest, - dict, -]) -def test_create_table(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gba_table.Table( - name='name_value', - granularity=gba_table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - response = client.create_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gba_table.Table) - assert response.name == 'name_value' - assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -def test_create_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table), - '__call__') as call: - client.create_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest() - -@pytest.mark.asyncio -async def test_create_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.CreateTableRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(gba_table.Table( - name='name_value', - granularity=gba_table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - )) - response = await client.create_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, gba_table.Table) - assert response.name == 'name_value' - assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -@pytest.mark.asyncio -async def test_create_table_async_from_dict(): - await test_create_table_async(request_type=dict) - - -def test_create_table_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateTableRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table), - '__call__') as call: - call.return_value = gba_table.Table() - client.create_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateTableRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gba_table.Table()) - await client.create_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_create_table_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gba_table.Table() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_table( - parent='parent_value', - table_id='table_id_value', - table=gba_table.Table(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].table_id - mock_val = 'table_id_value' - assert arg == mock_val - arg = args[0].table - mock_val = gba_table.Table(name='name_value') - assert arg == mock_val - - -def test_create_table_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_table( - bigtable_table_admin.CreateTableRequest(), - parent='parent_value', - table_id='table_id_value', - table=gba_table.Table(name='name_value'), - ) - -@pytest.mark.asyncio -async def test_create_table_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = gba_table.Table() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(gba_table.Table()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_table( - parent='parent_value', - table_id='table_id_value', - table=gba_table.Table(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].table_id - mock_val = 'table_id_value' - assert arg == mock_val - arg = args[0].table - mock_val = gba_table.Table(name='name_value') - assert arg == mock_val - -@pytest.mark.asyncio -async def test_create_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_table( - bigtable_table_admin.CreateTableRequest(), - parent='parent_value', - table_id='table_id_value', - table=gba_table.Table(name='name_value'), - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.CreateTableFromSnapshotRequest, - dict, -]) -def test_create_table_from_snapshot(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_table_from_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_table_from_snapshot_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), - '__call__') as call: - client.create_table_from_snapshot() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() - -@pytest.mark.asyncio -async def test_create_table_from_snapshot_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.CreateTableFromSnapshotRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_table_from_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateTableFromSnapshotRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_table_from_snapshot_async_from_dict(): - await test_create_table_from_snapshot_async(request_type=dict) - - -def test_create_table_from_snapshot_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateTableFromSnapshotRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_table_from_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_table_from_snapshot_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateTableFromSnapshotRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_table_from_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_create_table_from_snapshot_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_table_from_snapshot( - parent='parent_value', - table_id='table_id_value', - source_snapshot='source_snapshot_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].table_id - mock_val = 'table_id_value' - assert arg == mock_val - arg = args[0].source_snapshot - mock_val = 'source_snapshot_value' - assert arg == mock_val - - -def test_create_table_from_snapshot_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_table_from_snapshot( - bigtable_table_admin.CreateTableFromSnapshotRequest(), - parent='parent_value', - table_id='table_id_value', - source_snapshot='source_snapshot_value', - ) - -@pytest.mark.asyncio -async def test_create_table_from_snapshot_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_table_from_snapshot), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_table_from_snapshot( - parent='parent_value', - table_id='table_id_value', - source_snapshot='source_snapshot_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].table_id - mock_val = 'table_id_value' - assert arg == mock_val - arg = args[0].source_snapshot - mock_val = 'source_snapshot_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_create_table_from_snapshot_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_table_from_snapshot( - bigtable_table_admin.CreateTableFromSnapshotRequest(), - parent='parent_value', - table_id='table_id_value', - source_snapshot='source_snapshot_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.ListTablesRequest, - dict, -]) -def test_list_tables(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tables), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListTablesResponse( - next_page_token='next_page_token_value', - ) - response = client.list_tables(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTablesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tables_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tables), - '__call__') as call: - client.list_tables() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest() - -@pytest.mark.asyncio -async def test_list_tables_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.ListTablesRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tables), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListTablesResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_tables(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListTablesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTablesAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_tables_async_from_dict(): - await test_list_tables_async(request_type=dict) - - -def test_list_tables_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListTablesRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tables), - '__call__') as call: - call.return_value = bigtable_table_admin.ListTablesResponse() - client.list_tables(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_tables_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListTablesRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tables), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListTablesResponse()) - await client.list_tables(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_tables_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tables), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListTablesResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_tables( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_tables_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tables( - bigtable_table_admin.ListTablesRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_tables_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tables), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListTablesResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListTablesResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_tables( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_tables_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_tables( - bigtable_table_admin.ListTablesRequest(), - parent='parent_value', - ) - - -def test_list_tables_pager(transport_name: str = "grpc"): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tables), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - table.Table(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListTablesResponse( - tables=[], - next_page_token='def', - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_tables(request={}) - - assert pager._metadata == metadata - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Table) - for i in results) -def test_list_tables_pages(transport_name: str = "grpc"): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tables), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - table.Table(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListTablesResponse( - tables=[], - next_page_token='def', - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - ], - ), - RuntimeError, - ) - pages = list(client.list_tables(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_tables_async_pager(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tables), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - table.Table(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListTablesResponse( - tables=[], - next_page_token='def', - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_tables(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, table.Table) - for i in responses) - - -@pytest.mark.asyncio -async def test_list_tables_async_pages(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_tables), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - table.Table(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListTablesResponse( - tables=[], - next_page_token='def', - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_tables(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.GetTableRequest, - dict, -]) -def test_get_table(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Table( - name='name_value', - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - response = client.get_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == 'name_value' - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -def test_get_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table), - '__call__') as call: - client.get_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest() - -@pytest.mark.asyncio -async def test_get_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.GetTableRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(table.Table( - name='name_value', - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - )) - response = await client.get_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetTableRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == 'name_value' - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -@pytest.mark.asyncio -async def test_get_table_async_from_dict(): - await test_get_table_async(request_type=dict) - - -def test_get_table_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetTableRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table), - '__call__') as call: - call.return_value = table.Table() - client.get_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetTableRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) - await client.get_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_table_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Table() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_table( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_table_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_table( - bigtable_table_admin.GetTableRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_table_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Table() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_table( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_table( - bigtable_table_admin.GetTableRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.UpdateTableRequest, - dict, -]) -def test_update_table(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.update_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateTableRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_update_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table), - '__call__') as call: - client.update_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateTableRequest() - -@pytest.mark.asyncio -async def test_update_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.UpdateTableRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.update_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateTableRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_update_table_async_from_dict(): - await test_update_table_async(request_type=dict) - - -def test_update_table_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateTableRequest() - - request.table.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.update_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'table.name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateTableRequest() - - request.table.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.update_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'table.name=name_value', - ) in kw['metadata'] - - -def test_update_table_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_table( - table=gba_table.Table(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].table - mock_val = gba_table.Table(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_table_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_table( - bigtable_table_admin.UpdateTableRequest(), - table=gba_table.Table(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - -@pytest.mark.asyncio -async def test_update_table_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_table( - table=gba_table.Table(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].table - mock_val = gba_table.Table(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_update_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_table( - bigtable_table_admin.UpdateTableRequest(), - table=gba_table.Table(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.DeleteTableRequest, - dict, -]) -def test_delete_table(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_table), - '__call__') as call: - client.delete_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest() - -@pytest.mark.asyncio -async def test_delete_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.DeleteTableRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteTableRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_table_async_from_dict(): - await test_delete_table_async(request_type=dict) - - -def test_delete_table_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteTableRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_table), - '__call__') as call: - call.return_value = None - client.delete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteTableRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_table), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_delete_table_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_table( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_table_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_table( - bigtable_table_admin.DeleteTableRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_delete_table_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_table( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_delete_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_table( - bigtable_table_admin.DeleteTableRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.UndeleteTableRequest, - dict, -]) -def test_undelete_table(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undelete_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.undelete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UndeleteTableRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_undelete_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undelete_table), - '__call__') as call: - client.undelete_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UndeleteTableRequest() - -@pytest.mark.asyncio -async def test_undelete_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.UndeleteTableRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undelete_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.undelete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UndeleteTableRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_undelete_table_async_from_dict(): - await test_undelete_table_async(request_type=dict) - - -def test_undelete_table_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UndeleteTableRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undelete_table), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.undelete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_undelete_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UndeleteTableRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undelete_table), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.undelete_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_undelete_table_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undelete_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.undelete_table( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_undelete_table_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undelete_table( - bigtable_table_admin.UndeleteTableRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_undelete_table_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.undelete_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.undelete_table( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_undelete_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.undelete_table( - bigtable_table_admin.UndeleteTableRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.ModifyColumnFamiliesRequest, - dict, -]) -def test_modify_column_families(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Table( - name='name_value', - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - response = client.modify_column_families(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == 'name_value' - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -def test_modify_column_families_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), - '__call__') as call: - client.modify_column_families() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() - -@pytest.mark.asyncio -async def test_modify_column_families_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.ModifyColumnFamiliesRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(table.Table( - name='name_value', - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - )) - response = await client.modify_column_families(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ModifyColumnFamiliesRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == 'name_value' - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -@pytest.mark.asyncio -async def test_modify_column_families_async_from_dict(): - await test_modify_column_families_async(request_type=dict) - - -def test_modify_column_families_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ModifyColumnFamiliesRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), - '__call__') as call: - call.return_value = table.Table() - client.modify_column_families(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_modify_column_families_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ModifyColumnFamiliesRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) - await client.modify_column_families(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_modify_column_families_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Table() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.modify_column_families( - name='name_value', - modifications=[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].modifications - mock_val = [bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')] - assert arg == mock_val - - -def test_modify_column_families_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.modify_column_families( - bigtable_table_admin.ModifyColumnFamiliesRequest(), - name='name_value', - modifications=[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')], - ) - -@pytest.mark.asyncio -async def test_modify_column_families_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.modify_column_families), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Table() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Table()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.modify_column_families( - name='name_value', - modifications=[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].modifications - mock_val = [bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')] - assert arg == mock_val - -@pytest.mark.asyncio -async def test_modify_column_families_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.modify_column_families( - bigtable_table_admin.ModifyColumnFamiliesRequest(), - name='name_value', - modifications=[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')], - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.DropRowRangeRequest, - dict, -]) -def test_drop_row_range(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.drop_row_range), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_drop_row_range_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.drop_row_range), - '__call__') as call: - client.drop_row_range() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() - -@pytest.mark.asyncio -async def test_drop_row_range_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.DropRowRangeRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.drop_row_range), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DropRowRangeRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_drop_row_range_async_from_dict(): - await test_drop_row_range_async(request_type=dict) - - -def test_drop_row_range_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DropRowRangeRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.drop_row_range), - '__call__') as call: - call.return_value = None - client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_drop_row_range_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DropRowRangeRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.drop_row_range), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.drop_row_range(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.GenerateConsistencyTokenRequest, - dict, -]) -def test_generate_consistency_token(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token='consistency_token_value', - ) - response = client.generate_consistency_token(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == 'consistency_token_value' - - -def test_generate_consistency_token_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), - '__call__') as call: - client.generate_consistency_token() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() - -@pytest.mark.asyncio -async def test_generate_consistency_token_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.GenerateConsistencyTokenRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token='consistency_token_value', - )) - response = await client.generate_consistency_token(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GenerateConsistencyTokenRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == 'consistency_token_value' - - -@pytest.mark.asyncio -async def test_generate_consistency_token_async_from_dict(): - await test_generate_consistency_token_async(request_type=dict) - - -def test_generate_consistency_token_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GenerateConsistencyTokenRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), - '__call__') as call: - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - client.generate_consistency_token(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_generate_consistency_token_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GenerateConsistencyTokenRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.GenerateConsistencyTokenResponse()) - await client.generate_consistency_token(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_generate_consistency_token_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.generate_consistency_token( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_generate_consistency_token_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_generate_consistency_token_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.generate_consistency_token), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.GenerateConsistencyTokenResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.generate_consistency_token( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_generate_consistency_token_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.CheckConsistencyRequest, - dict, -]) -def test_check_consistency(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.CheckConsistencyResponse( - consistent=True, - ) - response = client.check_consistency(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True - - -def test_check_consistency_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), - '__call__') as call: - client.check_consistency() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() - -@pytest.mark.asyncio -async def test_check_consistency_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.CheckConsistencyRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.CheckConsistencyResponse( - consistent=True, - )) - response = await client.check_consistency(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CheckConsistencyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True - - -@pytest.mark.asyncio -async def test_check_consistency_async_from_dict(): - await test_check_consistency_async(request_type=dict) - - -def test_check_consistency_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CheckConsistencyRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), - '__call__') as call: - call.return_value = bigtable_table_admin.CheckConsistencyResponse() - client.check_consistency(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_check_consistency_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CheckConsistencyRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.CheckConsistencyResponse()) - await client.check_consistency(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_check_consistency_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.CheckConsistencyResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.check_consistency( - name='name_value', - consistency_token='consistency_token_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].consistency_token - mock_val = 'consistency_token_value' - assert arg == mock_val - - -def test_check_consistency_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.check_consistency( - bigtable_table_admin.CheckConsistencyRequest(), - name='name_value', - consistency_token='consistency_token_value', - ) - -@pytest.mark.asyncio -async def test_check_consistency_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.check_consistency), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.CheckConsistencyResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.CheckConsistencyResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.check_consistency( - name='name_value', - consistency_token='consistency_token_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].consistency_token - mock_val = 'consistency_token_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_check_consistency_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.check_consistency( - bigtable_table_admin.CheckConsistencyRequest(), - name='name_value', - consistency_token='consistency_token_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.SnapshotTableRequest, - dict, -]) -def test_snapshot_table(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.snapshot_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.snapshot_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_snapshot_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.snapshot_table), - '__call__') as call: - client.snapshot_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() - -@pytest.mark.asyncio -async def test_snapshot_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.SnapshotTableRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.snapshot_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.snapshot_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.SnapshotTableRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_snapshot_table_async_from_dict(): - await test_snapshot_table_async(request_type=dict) - - -def test_snapshot_table_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.SnapshotTableRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.snapshot_table), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.snapshot_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_snapshot_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.SnapshotTableRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.snapshot_table), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.snapshot_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_snapshot_table_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.snapshot_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.snapshot_table( - name='name_value', - cluster='cluster_value', - snapshot_id='snapshot_id_value', - description='description_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].cluster - mock_val = 'cluster_value' - assert arg == mock_val - arg = args[0].snapshot_id - mock_val = 'snapshot_id_value' - assert arg == mock_val - arg = args[0].description - mock_val = 'description_value' - assert arg == mock_val - - -def test_snapshot_table_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.snapshot_table( - bigtable_table_admin.SnapshotTableRequest(), - name='name_value', - cluster='cluster_value', - snapshot_id='snapshot_id_value', - description='description_value', - ) - -@pytest.mark.asyncio -async def test_snapshot_table_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.snapshot_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.snapshot_table( - name='name_value', - cluster='cluster_value', - snapshot_id='snapshot_id_value', - description='description_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - arg = args[0].cluster - mock_val = 'cluster_value' - assert arg == mock_val - arg = args[0].snapshot_id - mock_val = 'snapshot_id_value' - assert arg == mock_val - arg = args[0].description - mock_val = 'description_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_snapshot_table_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.snapshot_table( - bigtable_table_admin.SnapshotTableRequest(), - name='name_value', - cluster='cluster_value', - snapshot_id='snapshot_id_value', - description='description_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.GetSnapshotRequest, - dict, -]) -def test_get_snapshot(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_snapshot), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Snapshot( - name='name_value', - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description='description_value', - ) - response = client.get_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) - assert response.name == 'name_value' - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == 'description_value' - - -def test_get_snapshot_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_snapshot), - '__call__') as call: - client.get_snapshot() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() - -@pytest.mark.asyncio -async def test_get_snapshot_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.GetSnapshotRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_snapshot), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot( - name='name_value', - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description='description_value', - )) - response = await client.get_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetSnapshotRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) - assert response.name == 'name_value' - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == 'description_value' - - -@pytest.mark.asyncio -async def test_get_snapshot_async_from_dict(): - await test_get_snapshot_async(request_type=dict) - - -def test_get_snapshot_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetSnapshotRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_snapshot), - '__call__') as call: - call.return_value = table.Snapshot() - client.get_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_snapshot_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetSnapshotRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_snapshot), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) - await client.get_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_snapshot_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_snapshot), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Snapshot() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_snapshot( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_snapshot_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_snapshot_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_snapshot), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Snapshot() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Snapshot()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_snapshot( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_snapshot_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.ListSnapshotsRequest, - dict, -]) -def test_list_snapshots(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSnapshotsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_snapshots_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), - '__call__') as call: - client.list_snapshots() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() - -@pytest.mark.asyncio -async def test_list_snapshots_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.ListSnapshotsRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListSnapshotsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListSnapshotsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_snapshots_async_from_dict(): - await test_list_snapshots_async(request_type=dict) - - -def test_list_snapshots_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListSnapshotsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), - '__call__') as call: - call.return_value = bigtable_table_admin.ListSnapshotsResponse() - client.list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_snapshots_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListSnapshotsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListSnapshotsResponse()) - await client.list_snapshots(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_snapshots_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSnapshotsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_snapshots( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_snapshots_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_snapshots_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListSnapshotsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListSnapshotsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_snapshots( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_snapshots_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), - parent='parent_value', - ) - - -def test_list_snapshots_pager(transport_name: str = "grpc"): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token='def', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_snapshots(request={}) - - assert pager._metadata == metadata - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Snapshot) - for i in results) -def test_list_snapshots_pages(transport_name: str = "grpc"): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token='def', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, - ) - pages = list(client.list_snapshots(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_snapshots_async_pager(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token='def', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_snapshots(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, table.Snapshot) - for i in responses) - - -@pytest.mark.asyncio -async def test_list_snapshots_async_pages(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_snapshots), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token='def', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_snapshots(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.DeleteSnapshotRequest, - dict, -]) -def test_delete_snapshot(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_snapshot), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_snapshot_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_snapshot), - '__call__') as call: - client.delete_snapshot() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() - -@pytest.mark.asyncio -async def test_delete_snapshot_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.DeleteSnapshotRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_snapshot), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteSnapshotRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_snapshot_async_from_dict(): - await test_delete_snapshot_async(request_type=dict) - - -def test_delete_snapshot_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteSnapshotRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_snapshot), - '__call__') as call: - call.return_value = None - client.delete_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_snapshot_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteSnapshotRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_snapshot), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_snapshot(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_delete_snapshot_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_snapshot), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_snapshot( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_snapshot_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_delete_snapshot_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_snapshot), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_snapshot( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_delete_snapshot_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.CreateBackupRequest, - dict, -]) -def test_create_backup(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.create_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_create_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup), - '__call__') as call: - client.create_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() - -@pytest.mark.asyncio -async def test_create_backup_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.CreateBackupRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.create_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CreateBackupRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_create_backup_async_from_dict(): - await test_create_backup_async(request_type=dict) - - -def test_create_backup_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateBackupRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.create_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_create_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CreateBackupRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.create_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_create_backup_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.create_backup( - parent='parent_value', - backup_id='backup_id_value', - backup=table.Backup(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].backup_id - mock_val = 'backup_id_value' - assert arg == mock_val - arg = args[0].backup - mock_val = table.Backup(name='name_value') - assert arg == mock_val - - -def test_create_backup_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_backup( - bigtable_table_admin.CreateBackupRequest(), - parent='parent_value', - backup_id='backup_id_value', - backup=table.Backup(name='name_value'), - ) - -@pytest.mark.asyncio -async def test_create_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.create_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.create_backup( - parent='parent_value', - backup_id='backup_id_value', - backup=table.Backup(name='name_value'), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].backup_id - mock_val = 'backup_id_value' - assert arg == mock_val - arg = args[0].backup - mock_val = table.Backup(name='name_value') - assert arg == mock_val - -@pytest.mark.asyncio -async def test_create_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.create_backup( - bigtable_table_admin.CreateBackupRequest(), - parent='parent_value', - backup_id='backup_id_value', - backup=table.Backup(name='name_value'), - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.GetBackupRequest, - dict, -]) -def test_get_backup(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Backup( - name='name_value', - source_table='source_table_value', - source_backup='source_backup_value', - size_bytes=1089, - state=table.Backup.State.CREATING, - ) - response = client.get_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == 'name_value' - assert response.source_table == 'source_table_value' - assert response.source_backup == 'source_backup_value' - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - - -def test_get_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_backup), - '__call__') as call: - client.get_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() - -@pytest.mark.asyncio -async def test_get_backup_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.GetBackupRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(table.Backup( - name='name_value', - source_table='source_table_value', - source_backup='source_backup_value', - size_bytes=1089, - state=table.Backup.State.CREATING, - )) - response = await client.get_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.GetBackupRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == 'name_value' - assert response.source_table == 'source_table_value' - assert response.source_backup == 'source_backup_value' - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - - -@pytest.mark.asyncio -async def test_get_backup_async_from_dict(): - await test_get_backup_async(request_type=dict) - - -def test_get_backup_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetBackupRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_backup), - '__call__') as call: - call.return_value = table.Backup() - client.get_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.GetBackupRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_backup), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - await client.get_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_get_backup_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Backup() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_backup( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_get_backup_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_backup( - bigtable_table_admin.GetBackupRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_get_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Backup() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_backup( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_backup( - bigtable_table_admin.GetBackupRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.UpdateBackupRequest, - dict, -]) -def test_update_backup(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Backup( - name='name_value', - source_table='source_table_value', - source_backup='source_backup_value', - size_bytes=1089, - state=table.Backup.State.CREATING, - ) - response = client.update_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == 'name_value' - assert response.source_table == 'source_table_value' - assert response.source_backup == 'source_backup_value' - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - - -def test_update_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_backup), - '__call__') as call: - client.update_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() - -@pytest.mark.asyncio -async def test_update_backup_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.UpdateBackupRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(table.Backup( - name='name_value', - source_table='source_table_value', - source_backup='source_backup_value', - size_bytes=1089, - state=table.Backup.State.CREATING, - )) - response = await client.update_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.UpdateBackupRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == 'name_value' - assert response.source_table == 'source_table_value' - assert response.source_backup == 'source_backup_value' - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - - -@pytest.mark.asyncio -async def test_update_backup_async_from_dict(): - await test_update_backup_async(request_type=dict) - - -def test_update_backup_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateBackupRequest() - - request.backup.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_backup), - '__call__') as call: - call.return_value = table.Backup() - client.update_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'backup.name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_update_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.UpdateBackupRequest() - - request.backup.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_backup), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - await client.update_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'backup.name=name_value', - ) in kw['metadata'] - - -def test_update_backup_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Backup() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.update_backup( - backup=table.Backup(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].backup - mock_val = table.Backup(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - - -def test_update_backup_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_backup( - bigtable_table_admin.UpdateBackupRequest(), - backup=table.Backup(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - -@pytest.mark.asyncio -async def test_update_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.update_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = table.Backup() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(table.Backup()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.update_backup( - backup=table.Backup(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].backup - mock_val = table.Backup(name='name_value') - assert arg == mock_val - arg = args[0].update_mask - mock_val = field_mask_pb2.FieldMask(paths=['paths_value']) - assert arg == mock_val - -@pytest.mark.asyncio -async def test_update_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.update_backup( - bigtable_table_admin.UpdateBackupRequest(), - backup=table.Backup(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.DeleteBackupRequest, - dict, -]) -def test_delete_backup(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - response = client.delete_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_backup), - '__call__') as call: - client.delete_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() - -@pytest.mark.asyncio -async def test_delete_backup_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.DeleteBackupRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - response = await client.delete_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.DeleteBackupRequest() - - # Establish that the response is the type that we expect. - assert response is None - - -@pytest.mark.asyncio -async def test_delete_backup_async_from_dict(): - await test_delete_backup_async(request_type=dict) - - -def test_delete_backup_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteBackupRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_backup), - '__call__') as call: - call.return_value = None - client.delete_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_delete_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.DeleteBackupRequest() - - request.name = 'name_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_backup), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - await client.delete_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'name=name_value', - ) in kw['metadata'] - - -def test_delete_backup_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.delete_backup( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - - -def test_delete_backup_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), - name='name_value', - ) - -@pytest.mark.asyncio -async def test_delete_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.delete_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = None - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.delete_backup( - name='name_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].name - mock_val = 'name_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_delete_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), - name='name_value', - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.ListBackupsRequest, - dict, -]) -def test_list_backups(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_backups), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListBackupsResponse( - next_page_token='next_page_token_value', - ) - response = client.list_backups(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_backups_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_backups), - '__call__') as call: - client.list_backups() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() - -@pytest.mark.asyncio -async def test_list_backups_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.ListBackupsRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_backups), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListBackupsResponse( - next_page_token='next_page_token_value', - )) - response = await client.list_backups(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.ListBackupsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsAsyncPager) - assert response.next_page_token == 'next_page_token_value' - - -@pytest.mark.asyncio -async def test_list_backups_async_from_dict(): - await test_list_backups_async(request_type=dict) - - -def test_list_backups_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListBackupsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_backups), - '__call__') as call: - call.return_value = bigtable_table_admin.ListBackupsResponse() - client.list_backups(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_list_backups_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.ListBackupsRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_backups), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListBackupsResponse()) - await client.list_backups(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_list_backups_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_backups), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListBackupsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.list_backups( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - - -def test_list_backups_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_backups( - bigtable_table_admin.ListBackupsRequest(), - parent='parent_value', - ) - -@pytest.mark.asyncio -async def test_list_backups_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_backups), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = bigtable_table_admin.ListBackupsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(bigtable_table_admin.ListBackupsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.list_backups( - parent='parent_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_list_backups_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.list_backups( - bigtable_table_admin.ListBackupsRequest(), - parent='parent_value', - ) - - -def test_list_backups_pager(transport_name: str = "grpc"): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_backups), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[], - next_page_token='def', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - ], - ), - RuntimeError, - ) - - metadata = () - metadata = tuple(metadata) + ( - gapic_v1.routing_header.to_grpc_metadata(( - ('parent', ''), - )), - ) - pager = client.list_backups(request={}) - - assert pager._metadata == metadata - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Backup) - for i in results) -def test_list_backups_pages(transport_name: str = "grpc"): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport_name, - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_backups), - '__call__') as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[], - next_page_token='def', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - ], - ), - RuntimeError, - ) - pages = list(client.list_backups(request={}).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.asyncio -async def test_list_backups_async_pager(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_backups), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[], - next_page_token='def', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - ], - ), - RuntimeError, - ) - async_pager = await client.list_backups(request={},) - assert async_pager.next_page_token == 'abc' - responses = [] - async for response in async_pager: # pragma: no branch - responses.append(response) - - assert len(responses) == 6 - assert all(isinstance(i, table.Backup) - for i in responses) - - -@pytest.mark.asyncio -async def test_list_backups_async_pages(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.list_backups), - '__call__', new_callable=mock.AsyncMock) as call: - # Set the response to a series of pages. - call.side_effect = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[], - next_page_token='def', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - ], - ), - RuntimeError, - ) - pages = [] - # Workaround issue in python 3.9 related to code coverage by adding `# pragma: no branch` - # See https://github.com/googleapis/gapic-generator-python/pull/1174#issuecomment-1025132372 - async for page_ in ( # pragma: no branch - await client.list_backups(request={}) - ).pages: - pages.append(page_) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.RestoreTableRequest, - dict, -]) -def test_restore_table(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.restore_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.restore_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_restore_table_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.restore_table), - '__call__') as call: - client.restore_table() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() - -@pytest.mark.asyncio -async def test_restore_table_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.RestoreTableRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.restore_table), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.restore_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.RestoreTableRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_restore_table_async_from_dict(): - await test_restore_table_async(request_type=dict) - - -def test_restore_table_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.RestoreTableRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.restore_table), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.restore_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_restore_table_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.RestoreTableRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.restore_table), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.restore_table(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.CopyBackupRequest, - dict, -]) -def test_copy_backup(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.copy_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/spam') - response = client.copy_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CopyBackupRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -def test_copy_backup_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.copy_backup), - '__call__') as call: - client.copy_backup() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CopyBackupRequest() - -@pytest.mark.asyncio -async def test_copy_backup_async(transport: str = 'grpc_asyncio', request_type=bigtable_table_admin.CopyBackupRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.copy_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - response = await client.copy_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == bigtable_table_admin.CopyBackupRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, future.Future) - - -@pytest.mark.asyncio -async def test_copy_backup_async_from_dict(): - await test_copy_backup_async(request_type=dict) - - -def test_copy_backup_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CopyBackupRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.copy_backup), - '__call__') as call: - call.return_value = operations_pb2.Operation(name='operations/op') - client.copy_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_copy_backup_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = bigtable_table_admin.CopyBackupRequest() - - request.parent = 'parent_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.copy_backup), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(operations_pb2.Operation(name='operations/op')) - await client.copy_backup(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'parent=parent_value', - ) in kw['metadata'] - - -def test_copy_backup_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.copy_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.copy_backup( - parent='parent_value', - backup_id='backup_id_value', - source_backup='source_backup_value', - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].backup_id - mock_val = 'backup_id_value' - assert arg == mock_val - arg = args[0].source_backup - mock_val = 'source_backup_value' - assert arg == mock_val - assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp(seconds=751) - - -def test_copy_backup_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.copy_backup( - bigtable_table_admin.CopyBackupRequest(), - parent='parent_value', - backup_id='backup_id_value', - source_backup='source_backup_value', - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) - -@pytest.mark.asyncio -async def test_copy_backup_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.copy_backup), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = operations_pb2.Operation(name='operations/op') - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall( - operations_pb2.Operation(name='operations/spam') - ) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.copy_backup( - parent='parent_value', - backup_id='backup_id_value', - source_backup='source_backup_value', - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].parent - mock_val = 'parent_value' - assert arg == mock_val - arg = args[0].backup_id - mock_val = 'backup_id_value' - assert arg == mock_val - arg = args[0].source_backup - mock_val = 'source_backup_value' - assert arg == mock_val - assert TimestampRule().to_proto(args[0].expire_time) == timestamp_pb2.Timestamp(seconds=751) - -@pytest.mark.asyncio -async def test_copy_backup_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.copy_backup( - bigtable_table_admin.CopyBackupRequest(), - parent='parent_value', - backup_id='backup_id_value', - source_backup='source_backup_value', - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) - - -@pytest.mark.parametrize("request_type", [ - iam_policy_pb2.GetIamPolicyRequest, - dict, -]) -def test_get_iam_policy(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy( - version=774, - etag=b'etag_blob', - ) - response = client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b'etag_blob' - - -def test_get_iam_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - client.get_iam_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() - -@pytest.mark.asyncio -async def test_get_iam_policy_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.GetIamPolicyRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy( - version=774, - etag=b'etag_blob', - )) - response = await client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.GetIamPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b'etag_blob' - - -@pytest.mark.asyncio -async def test_get_iam_policy_async_from_dict(): - await test_get_iam_policy_async(request_type=dict) - - -def test_get_iam_policy_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.GetIamPolicyRequest() - - request.resource = 'resource_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - call.return_value = policy_pb2.Policy() - client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'resource=resource_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_get_iam_policy_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.GetIamPolicyRequest() - - request.resource = 'resource_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - await client.get_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'resource=resource_value', - ) in kw['metadata'] - -def test_get_iam_policy_from_dict_foreign(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - response = client.get_iam_policy(request={ - 'resource': 'resource_value', - 'options': options_pb2.GetPolicyOptions(requested_policy_version=2598), - } - ) - call.assert_called() - - -def test_get_iam_policy_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.get_iam_policy( - resource='resource_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = 'resource_value' - assert arg == mock_val - - -def test_get_iam_policy_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource='resource_value', - ) - -@pytest.mark.asyncio -async def test_get_iam_policy_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.get_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.get_iam_policy( - resource='resource_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = 'resource_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_get_iam_policy_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource='resource_value', - ) - - -@pytest.mark.parametrize("request_type", [ - iam_policy_pb2.SetIamPolicyRequest, - dict, -]) -def test_set_iam_policy(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy( - version=774, - etag=b'etag_blob', - ) - response = client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b'etag_blob' - - -def test_set_iam_policy_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - client.set_iam_policy() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() - -@pytest.mark.asyncio -async def test_set_iam_policy_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.SetIamPolicyRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy( - version=774, - etag=b'etag_blob', - )) - response = await client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.SetIamPolicyRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b'etag_blob' - - -@pytest.mark.asyncio -async def test_set_iam_policy_async_from_dict(): - await test_set_iam_policy_async(request_type=dict) - - -def test_set_iam_policy_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.SetIamPolicyRequest() - - request.resource = 'resource_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - call.return_value = policy_pb2.Policy() - client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'resource=resource_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_set_iam_policy_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.SetIamPolicyRequest() - - request.resource = 'resource_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - await client.set_iam_policy(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'resource=resource_value', - ) in kw['metadata'] - -def test_set_iam_policy_from_dict_foreign(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - response = client.set_iam_policy(request={ - 'resource': 'resource_value', - 'policy': policy_pb2.Policy(version=774), - 'update_mask': field_mask_pb2.FieldMask(paths=['paths_value']), - } - ) - call.assert_called() - - -def test_set_iam_policy_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.set_iam_policy( - resource='resource_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = 'resource_value' - assert arg == mock_val - - -def test_set_iam_policy_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource='resource_value', - ) - -@pytest.mark.asyncio -async def test_set_iam_policy_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.set_iam_policy), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = policy_pb2.Policy() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.set_iam_policy( - resource='resource_value', - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = 'resource_value' - assert arg == mock_val - -@pytest.mark.asyncio -async def test_set_iam_policy_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource='resource_value', - ) - - -@pytest.mark.parametrize("request_type", [ - iam_policy_pb2.TestIamPermissionsRequest, - dict, -]) -def test_test_iam_permissions(request_type, transport: str = 'grpc'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=['permissions_value'], - ) - response = client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ['permissions_value'] - - -def test_test_iam_permissions_empty_call(): - # This test is a coverage failsafe to make sure that totally empty calls, - # i.e. request == None and no flattened fields passed, work. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - client.test_iam_permissions() - call.assert_called() - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() - -@pytest.mark.asyncio -async def test_test_iam_permissions_async(transport: str = 'grpc_asyncio', request_type=iam_policy_pb2.TestIamPermissionsRequest): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Everything is optional in proto3 as far as the runtime is concerned, - # and we are mocking out the actual API, so just send an empty request. - request = request_type() - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value =grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse( - permissions=['permissions_value'], - )) - response = await client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == iam_policy_pb2.TestIamPermissionsRequest() - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ['permissions_value'] - - -@pytest.mark.asyncio -async def test_test_iam_permissions_async_from_dict(): - await test_test_iam_permissions_async(request_type=dict) - - -def test_test_iam_permissions_field_headers(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.TestIamPermissionsRequest() - - request.resource = 'resource_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'resource=resource_value', - ) in kw['metadata'] - - -@pytest.mark.asyncio -async def test_test_iam_permissions_field_headers_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Any value that is part of the HTTP/1.1 URI should be sent as - # a field header. Set these to a non-empty value. - request = iam_policy_pb2.TestIamPermissionsRequest() - - request.resource = 'resource_value' - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse()) - await client.test_iam_permissions(request) - - # Establish that the underlying gRPC stub method was called. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - assert args[0] == request - - # Establish that the field header was sent. - _, _, kw = call.mock_calls[0] - assert ( - 'x-goog-request-params', - 'resource=resource_value', - ) in kw['metadata'] - -def test_test_iam_permissions_from_dict_foreign(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - response = client.test_iam_permissions(request={ - 'resource': 'resource_value', - 'permissions': ['permissions_value'], - } - ) - call.assert_called() - - -def test_test_iam_permissions_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - client.test_iam_permissions( - resource='resource_value', - permissions=['permissions_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) == 1 - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = 'resource_value' - assert arg == mock_val - arg = args[0].permissions - mock_val = ['permissions_value'] - assert arg == mock_val - - -def test_test_iam_permissions_flattened_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource='resource_value', - permissions=['permissions_value'], - ) - -@pytest.mark.asyncio -async def test_test_iam_permissions_flattened_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Mock the actual call within the gRPC stub, and fake the request. - with mock.patch.object( - type(client.transport.test_iam_permissions), - '__call__') as call: - # Designate an appropriate return value for the call. - call.return_value = iam_policy_pb2.TestIamPermissionsResponse() - - call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(iam_policy_pb2.TestIamPermissionsResponse()) - # Call the method with a truthy value for each flattened field, - # using the keyword arguments to the method. - response = await client.test_iam_permissions( - resource='resource_value', - permissions=['permissions_value'], - ) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(call.mock_calls) - _, args, _ = call.mock_calls[0] - arg = args[0].resource - mock_val = 'resource_value' - assert arg == mock_val - arg = args[0].permissions - mock_val = ['permissions_value'] - assert arg == mock_val - -@pytest.mark.asyncio -async def test_test_iam_permissions_flattened_error_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - await client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource='resource_value', - permissions=['permissions_value'], - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.CreateTableRequest, - dict, -]) -def test_create_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gba_table.Table( - name='name_value', - granularity=gba_table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.create_table(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, gba_table.Table) - assert response.name == 'name_value' - assert response.granularity == gba_table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -def test_create_table_rest_required_fields(request_type=bigtable_table_admin.CreateTableRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).create_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - jsonified_request["tableId"] = 'table_id_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).create_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == 'table_id_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = gba_table.Table() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.create_table(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_create_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.create_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("parent", "tableId", "table", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_create_table") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_create_table") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.CreateTableRequest.pb(bigtable_table_admin.CreateTableRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = gba_table.Table.to_json(gba_table.Table()) - - request = bigtable_table_admin.CreateTableRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = gba_table.Table() - - client.create_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.CreateTableRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_table(request) - - -def test_create_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = gba_table.Table() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/instances/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - table_id='table_id_value', - table=gba_table.Table(name='name_value'), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = gba_table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.create_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, args[1]) - - -def test_create_table_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_table( - bigtable_table_admin.CreateTableRequest(), - parent='parent_value', - table_id='table_id_value', - table=gba_table.Table(name='name_value'), - ) - - -def test_create_table_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.CreateTableFromSnapshotRequest, - dict, -]) -def test_create_table_from_snapshot_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.create_table_from_snapshot(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_create_table_from_snapshot_rest_required_fields(request_type=bigtable_table_admin.CreateTableFromSnapshotRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" - request_init["source_snapshot"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).create_table_from_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - jsonified_request["tableId"] = 'table_id_value' - jsonified_request["sourceSnapshot"] = 'source_snapshot_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).create_table_from_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == 'table_id_value' - assert "sourceSnapshot" in jsonified_request - assert jsonified_request["sourceSnapshot"] == 'source_snapshot_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.create_table_from_snapshot(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_create_table_from_snapshot_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.create_table_from_snapshot._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("parent", "tableId", "sourceSnapshot", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_table_from_snapshot_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_create_table_from_snapshot") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_create_table_from_snapshot") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.CreateTableFromSnapshotRequest.pb(bigtable_table_admin.CreateTableFromSnapshotRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = bigtable_table_admin.CreateTableFromSnapshotRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.create_table_from_snapshot(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_table_from_snapshot_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.CreateTableFromSnapshotRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_table_from_snapshot(request) - - -def test_create_table_from_snapshot_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/instances/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - table_id='table_id_value', - source_snapshot='source_snapshot_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.create_table_from_snapshot(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/tables:createFromSnapshot" % client.transport._host, args[1]) - - -def test_create_table_from_snapshot_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_table_from_snapshot( - bigtable_table_admin.CreateTableFromSnapshotRequest(), - parent='parent_value', - table_id='table_id_value', - source_snapshot='source_snapshot_value', - ) - - -def test_create_table_from_snapshot_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.ListTablesRequest, - dict, -]) -def test_list_tables_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse( - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_tables(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListTablesPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_tables_rest_required_fields(request_type=bigtable_table_admin.ListTablesRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_tables._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_tables._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("page_size", "page_token", "view", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_tables(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_tables_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.list_tables._get_unset_required_fields({}) - assert set(unset_fields) == (set(("pageSize", "pageToken", "view", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_tables_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_list_tables") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_list_tables") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.ListTablesRequest.pb(bigtable_table_admin.ListTablesRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_table_admin.ListTablesResponse.to_json(bigtable_table_admin.ListTablesResponse()) - - request = bigtable_table_admin.ListTablesRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListTablesResponse() - - client.list_tables(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_tables_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.ListTablesRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_tables(request) - - -def test_list_tables_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListTablesResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/instances/sample2'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListTablesResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_tables(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*/instances/*}/tables" % client.transport._host, args[1]) - - -def test_list_tables_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_tables( - bigtable_table_admin.ListTablesRequest(), - parent='parent_value', - ) - - -def test_list_tables_rest_pager(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - #with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - table.Table(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListTablesResponse( - tables=[], - next_page_token='def', - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListTablesResponse( - tables=[ - table.Table(), - table.Table(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple(bigtable_table_admin.ListTablesResponse.to_json(x) for x in response) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode('UTF-8') - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {'parent': 'projects/sample1/instances/sample2'} - - pager = client.list_tables(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Table) - for i in results) - - pages = list(client.list_tables(request=sample_request).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.GetTableRequest, - dict, -]) -def test_get_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = table.Table( - name='name_value', - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_table(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == 'name_value' - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -def test_get_table_rest_required_fields(request_type=bigtable_table_admin.GetTableRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_table._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("view", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.Table() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_table(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.get_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(("view", )) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_get_table") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_get_table") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.GetTableRequest.pb(bigtable_table_admin.GetTableRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Table.to_json(table.Table()) - - request = bigtable_table_admin.GetTableRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Table() - - client.get_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.GetTableRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_table(request) - - -def test_get_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = table.Table() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, args[1]) - - -def test_get_table_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_table( - bigtable_table_admin.GetTableRequest(), - name='name_value', - ) - - -def test_get_table_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.UpdateTableRequest, - dict, -]) -def test_update_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'table': {'name': 'projects/sample1/instances/sample2/tables/sample3'}} - request_init["table"] = {'name': 'projects/sample1/instances/sample2/tables/sample3', 'cluster_states': {}, 'column_families': {}, 'granularity': 1, 'restore_info': {'source_type': 1, 'backup_info': {'backup': 'backup_value', 'start_time': {'seconds': 751, 'nanos': 543}, 'end_time': {}, 'source_table': 'source_table_value', 'source_backup': 'source_backup_value'}}, 'change_stream_config': {'retention_period': {'seconds': 751, 'nanos': 543}}, 'deletion_protection': True} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.UpdateTableRequest.meta.fields["table"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["table"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["table"][field])): - del request_init["table"][field][i][subfield] - else: - del request_init["table"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.update_table(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_update_table_rest_required_fields(request_type=bigtable_table_admin.UpdateTableRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).update_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).update_table._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "patch", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.update_table(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_update_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.update_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(("updateMask", )) & set(("table", "updateMask", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_update_table") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_update_table") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.UpdateTableRequest.pb(bigtable_table_admin.UpdateTableRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = bigtable_table_admin.UpdateTableRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.update_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.UpdateTableRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'table': {'name': 'projects/sample1/instances/sample2/tables/sample3'}} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_table(request) - - -def test_update_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'table': {'name': 'projects/sample1/instances/sample2/tables/sample3'}} - - # get truthy value for each flattened field - mock_args = dict( - table=gba_table.Table(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.update_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{table.name=projects/*/instances/*/tables/*}" % client.transport._host, args[1]) - - -def test_update_table_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_table( - bigtable_table_admin.UpdateTableRequest(), - table=gba_table.Table(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_update_table_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.DeleteTableRequest, - dict, -]) -def test_delete_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.delete_table(request) - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_table_rest_required_fields(request_type=bigtable_table_admin.DeleteTableRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).delete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).delete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "delete", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.delete_table(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_delete_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.delete_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_delete_table") as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteTableRequest.pb(bigtable_table_admin.DeleteTableRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_table_admin.DeleteTableRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - - -def test_delete_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.DeleteTableRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_table(request) - - -def test_delete_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.delete_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}" % client.transport._host, args[1]) - - -def test_delete_table_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_table( - bigtable_table_admin.DeleteTableRequest(), - name='name_value', - ) - - -def test_delete_table_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.UndeleteTableRequest, - dict, -]) -def test_undelete_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.undelete_table(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_undelete_table_rest_required_fields(request_type=bigtable_table_admin.UndeleteTableRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).undelete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).undelete_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.undelete_table(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_undelete_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.undelete_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_undelete_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_undelete_table") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_undelete_table") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.UndeleteTableRequest.pb(bigtable_table_admin.UndeleteTableRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = bigtable_table_admin.UndeleteTableRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.undelete_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_undelete_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.UndeleteTableRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.undelete_table(request) - - -def test_undelete_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.undelete_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}:undelete" % client.transport._host, args[1]) - - -def test_undelete_table_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.undelete_table( - bigtable_table_admin.UndeleteTableRequest(), - name='name_value', - ) - - -def test_undelete_table_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.ModifyColumnFamiliesRequest, - dict, -]) -def test_modify_column_families_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = table.Table( - name='name_value', - granularity=table.Table.TimestampGranularity.MILLIS, - deletion_protection=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.modify_column_families(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Table) - assert response.name == 'name_value' - assert response.granularity == table.Table.TimestampGranularity.MILLIS - assert response.deletion_protection is True - - -def test_modify_column_families_rest_required_fields(request_type=bigtable_table_admin.ModifyColumnFamiliesRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).modify_column_families._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).modify_column_families._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.Table() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.modify_column_families(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_modify_column_families_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.modify_column_families._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "modifications", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_modify_column_families_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_modify_column_families") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_modify_column_families") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.ModifyColumnFamiliesRequest.pb(bigtable_table_admin.ModifyColumnFamiliesRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Table.to_json(table.Table()) - - request = bigtable_table_admin.ModifyColumnFamiliesRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Table() - - client.modify_column_families(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_modify_column_families_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.ModifyColumnFamiliesRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.modify_column_families(request) - - -def test_modify_column_families_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = table.Table() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - modifications=[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')], - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Table.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.modify_column_families(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}:modifyColumnFamilies" % client.transport._host, args[1]) - - -def test_modify_column_families_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.modify_column_families( - bigtable_table_admin.ModifyColumnFamiliesRequest(), - name='name_value', - modifications=[bigtable_table_admin.ModifyColumnFamiliesRequest.Modification(id='id_value')], - ) - - -def test_modify_column_families_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.DropRowRangeRequest, - dict, -]) -def test_drop_row_range_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.drop_row_range(request) - - # Establish that the response is the type that we expect. - assert response is None - - -def test_drop_row_range_rest_required_fields(request_type=bigtable_table_admin.DropRowRangeRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).drop_row_range._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).drop_row_range._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.drop_row_range(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_drop_row_range_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.drop_row_range._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_drop_row_range_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_drop_row_range") as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DropRowRangeRequest.pb(bigtable_table_admin.DropRowRangeRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_table_admin.DropRowRangeRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.drop_row_range(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - - -def test_drop_row_range_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.DropRowRangeRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.drop_row_range(request) - - -def test_drop_row_range_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.GenerateConsistencyTokenRequest, - dict, -]) -def test_generate_consistency_token_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse( - consistency_token='consistency_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.generate_consistency_token(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.GenerateConsistencyTokenResponse) - assert response.consistency_token == 'consistency_token_value' - - -def test_generate_consistency_token_rest_required_fields(request_type=bigtable_table_admin.GenerateConsistencyTokenRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).generate_consistency_token._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).generate_consistency_token._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.generate_consistency_token(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_generate_consistency_token_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.generate_consistency_token._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_generate_consistency_token_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_generate_consistency_token") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_generate_consistency_token") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.GenerateConsistencyTokenRequest.pb(bigtable_table_admin.GenerateConsistencyTokenRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_table_admin.GenerateConsistencyTokenResponse.to_json(bigtable_table_admin.GenerateConsistencyTokenResponse()) - - request = bigtable_table_admin.GenerateConsistencyTokenRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - - client.generate_consistency_token(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_generate_consistency_token_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.GenerateConsistencyTokenRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.generate_consistency_token(request) - - -def test_generate_consistency_token_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.GenerateConsistencyTokenResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.generate_consistency_token(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}:generateConsistencyToken" % client.transport._host, args[1]) - - -def test_generate_consistency_token_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.generate_consistency_token( - bigtable_table_admin.GenerateConsistencyTokenRequest(), - name='name_value', - ) - - -def test_generate_consistency_token_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.CheckConsistencyRequest, - dict, -]) -def test_check_consistency_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse( - consistent=True, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.check_consistency(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, bigtable_table_admin.CheckConsistencyResponse) - assert response.consistent is True - - -def test_check_consistency_rest_required_fields(request_type=bigtable_table_admin.CheckConsistencyRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request_init["consistency_token"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).check_consistency._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - jsonified_request["consistencyToken"] = 'consistency_token_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).check_consistency._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - assert "consistencyToken" in jsonified_request - assert jsonified_request["consistencyToken"] == 'consistency_token_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.check_consistency(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_check_consistency_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.check_consistency._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "consistencyToken", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_check_consistency_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_check_consistency") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_check_consistency") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.CheckConsistencyRequest.pb(bigtable_table_admin.CheckConsistencyRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_table_admin.CheckConsistencyResponse.to_json(bigtable_table_admin.CheckConsistencyResponse()) - - request = bigtable_table_admin.CheckConsistencyRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.CheckConsistencyResponse() - - client.check_consistency(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_check_consistency_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.CheckConsistencyRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.check_consistency(request) - - -def test_check_consistency_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.CheckConsistencyResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - consistency_token='consistency_token_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.CheckConsistencyResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.check_consistency(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}:checkConsistency" % client.transport._host, args[1]) - - -def test_check_consistency_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.check_consistency( - bigtable_table_admin.CheckConsistencyRequest(), - name='name_value', - consistency_token='consistency_token_value', - ) - - -def test_check_consistency_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.SnapshotTableRequest, - dict, -]) -def test_snapshot_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.snapshot_table(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_snapshot_table_rest_required_fields(request_type=bigtable_table_admin.SnapshotTableRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request_init["cluster"] = "" - request_init["snapshot_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).snapshot_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - jsonified_request["cluster"] = 'cluster_value' - jsonified_request["snapshotId"] = 'snapshot_id_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).snapshot_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - assert "cluster" in jsonified_request - assert jsonified_request["cluster"] == 'cluster_value' - assert "snapshotId" in jsonified_request - assert jsonified_request["snapshotId"] == 'snapshot_id_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.snapshot_table(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_snapshot_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.snapshot_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", "cluster", "snapshotId", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_snapshot_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_snapshot_table") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_snapshot_table") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.SnapshotTableRequest.pb(bigtable_table_admin.SnapshotTableRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = bigtable_table_admin.SnapshotTableRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.snapshot_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_snapshot_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.SnapshotTableRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.snapshot_table(request) - - -def test_snapshot_table_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - cluster='cluster_value', - snapshot_id='snapshot_id_value', - description='description_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.snapshot_table(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/tables/*}:snapshot" % client.transport._host, args[1]) - - -def test_snapshot_table_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.snapshot_table( - bigtable_table_admin.SnapshotTableRequest(), - name='name_value', - cluster='cluster_value', - snapshot_id='snapshot_id_value', - description='description_value', - ) - - -def test_snapshot_table_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.GetSnapshotRequest, - dict, -]) -def test_get_snapshot_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = table.Snapshot( - name='name_value', - data_size_bytes=1594, - state=table.Snapshot.State.READY, - description='description_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_snapshot(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Snapshot) - assert response.name == 'name_value' - assert response.data_size_bytes == 1594 - assert response.state == table.Snapshot.State.READY - assert response.description == 'description_value' - - -def test_get_snapshot_rest_required_fields(request_type=bigtable_table_admin.GetSnapshotRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.Snapshot() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_snapshot(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_snapshot_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.get_snapshot._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_snapshot_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_get_snapshot") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_get_snapshot") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.GetSnapshotRequest.pb(bigtable_table_admin.GetSnapshotRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Snapshot.to_json(table.Snapshot()) - - request = bigtable_table_admin.GetSnapshotRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Snapshot() - - client.get_snapshot(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_snapshot_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.GetSnapshotRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_snapshot(request) - - -def test_get_snapshot_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = table.Snapshot() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Snapshot.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_snapshot(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" % client.transport._host, args[1]) - - -def test_get_snapshot_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_snapshot( - bigtable_table_admin.GetSnapshotRequest(), - name='name_value', - ) - - -def test_get_snapshot_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.ListSnapshotsRequest, - dict, -]) -def test_list_snapshots_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse( - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_snapshots(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListSnapshotsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_snapshots_rest_required_fields(request_type=bigtable_table_admin.ListSnapshotsRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_snapshots._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_snapshots._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("page_size", "page_token", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_snapshots(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_snapshots_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.list_snapshots._get_unset_required_fields({}) - assert set(unset_fields) == (set(("pageSize", "pageToken", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_snapshots_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_list_snapshots") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_list_snapshots") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.ListSnapshotsRequest.pb(bigtable_table_admin.ListSnapshotsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_table_admin.ListSnapshotsResponse.to_json(bigtable_table_admin.ListSnapshotsResponse()) - - request = bigtable_table_admin.ListSnapshotsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListSnapshotsResponse() - - client.list_snapshots(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_snapshots_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.ListSnapshotsRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_snapshots(request) - - -def test_list_snapshots_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListSnapshotsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListSnapshotsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_snapshots(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*/instances/*/clusters/*}/snapshots" % client.transport._host, args[1]) - - -def test_list_snapshots_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_snapshots( - bigtable_table_admin.ListSnapshotsRequest(), - parent='parent_value', - ) - - -def test_list_snapshots_rest_pager(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - #with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - table.Snapshot(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[], - next_page_token='def', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListSnapshotsResponse( - snapshots=[ - table.Snapshot(), - table.Snapshot(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple(bigtable_table_admin.ListSnapshotsResponse.to_json(x) for x in response) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode('UTF-8') - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - - pager = client.list_snapshots(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Snapshot) - for i in results) - - pages = list(client.list_snapshots(request=sample_request).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.DeleteSnapshotRequest, - dict, -]) -def test_delete_snapshot_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.delete_snapshot(request) - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_snapshot_rest_required_fields(request_type=bigtable_table_admin.DeleteSnapshotRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).delete_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).delete_snapshot._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "delete", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.delete_snapshot(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_delete_snapshot_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.delete_snapshot._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_snapshot_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_delete_snapshot") as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteSnapshotRequest.pb(bigtable_table_admin.DeleteSnapshotRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_table_admin.DeleteSnapshotRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_snapshot(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - - -def test_delete_snapshot_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.DeleteSnapshotRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_snapshot(request) - - -def test_delete_snapshot_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/clusters/sample3/snapshots/sample4'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.delete_snapshot(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/clusters/*/snapshots/*}" % client.transport._host, args[1]) - - -def test_delete_snapshot_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_snapshot( - bigtable_table_admin.DeleteSnapshotRequest(), - name='name_value', - ) - - -def test_delete_snapshot_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.CreateBackupRequest, - dict, -]) -def test_create_backup_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - request_init["backup"] = {'name': 'name_value', 'source_table': 'source_table_value', 'source_backup': 'source_backup_value', 'expire_time': {'seconds': 751, 'nanos': 543}, 'start_time': {}, 'end_time': {}, 'size_bytes': 1089, 'state': 1, 'encryption_info': {'encryption_type': 1, 'encryption_status': {'code': 411, 'message': 'message_value', 'details': [{'type_url': 'type.googleapis.com/google.protobuf.Duration', 'value': b'\x08\x0c\x10\xdb\x07'}]}, 'kms_key_version': 'kms_key_version_value'}} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.CreateBackupRequest.meta.fields["backup"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["backup"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["backup"][field])): - del request_init["backup"][field][i][subfield] - else: - del request_init["backup"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.create_backup(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_create_backup_rest_required_fields(request_type=bigtable_table_admin.CreateBackupRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["backup_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - assert "backupId" not in jsonified_request - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).create_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == request_init["backup_id"] - - jsonified_request["parent"] = 'parent_value' - jsonified_request["backupId"] = 'backup_id_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).create_backup._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("backup_id", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == 'backup_id_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.create_backup(request) - - expected_params = [ - ( - "backupId", - "", - ), - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_create_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.create_backup._get_unset_required_fields({}) - assert set(unset_fields) == (set(("backupId", )) & set(("parent", "backupId", "backup", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_create_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_create_backup") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_create_backup") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.CreateBackupRequest.pb(bigtable_table_admin.CreateBackupRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = bigtable_table_admin.CreateBackupRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.create_backup(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_create_backup_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.CreateBackupRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.create_backup(request) - - -def test_create_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - backup_id='backup_id_value', - backup=table.Backup(name='name_value'), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.create_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" % client.transport._host, args[1]) - - -def test_create_backup_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.create_backup( - bigtable_table_admin.CreateBackupRequest(), - parent='parent_value', - backup_id='backup_id_value', - backup=table.Backup(name='name_value'), - ) - - -def test_create_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.GetBackupRequest, - dict, -]) -def test_get_backup_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup( - name='name_value', - source_table='source_table_value', - source_backup='source_backup_value', - size_bytes=1089, - state=table.Backup.State.CREATING, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_backup(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == 'name_value' - assert response.source_table == 'source_table_value' - assert response.source_backup == 'source_backup_value' - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - - -def test_get_backup_rest_required_fields(request_type=bigtable_table_admin.GetBackupRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.Backup() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_backup(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.get_backup._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_get_backup") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_get_backup") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.GetBackupRequest.pb(bigtable_table_admin.GetBackupRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Backup.to_json(table.Backup()) - - request = bigtable_table_admin.GetBackupRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Backup() - - client.get_backup(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_backup_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.GetBackupRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_backup(request) - - -def test_get_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup() - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" % client.transport._host, args[1]) - - -def test_get_backup_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_backup( - bigtable_table_admin.GetBackupRequest(), - name='name_value', - ) - - -def test_get_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.UpdateBackupRequest, - dict, -]) -def test_update_backup_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'backup': {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'}} - request_init["backup"] = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4', 'source_table': 'source_table_value', 'source_backup': 'source_backup_value', 'expire_time': {'seconds': 751, 'nanos': 543}, 'start_time': {}, 'end_time': {}, 'size_bytes': 1089, 'state': 1, 'encryption_info': {'encryption_type': 1, 'encryption_status': {'code': 411, 'message': 'message_value', 'details': [{'type_url': 'type.googleapis.com/google.protobuf.Duration', 'value': b'\x08\x0c\x10\xdb\x07'}]}, 'kms_key_version': 'kms_key_version_value'}} - # The version of a generated dependency at test runtime may differ from the version used during generation. - # Delete any fields which are not present in the current runtime dependency - # See https://github.com/googleapis/gapic-generator-python/issues/1748 - - # Determine if the message type is proto-plus or protobuf - test_field = bigtable_table_admin.UpdateBackupRequest.meta.fields["backup"] - - def get_message_fields(field): - # Given a field which is a message (composite type), return a list with - # all the fields of the message. - # If the field is not a composite type, return an empty list. - message_fields = [] - - if hasattr(field, "message") and field.message: - is_field_type_proto_plus_type = not hasattr(field.message, "DESCRIPTOR") - - if is_field_type_proto_plus_type: - message_fields = field.message.meta.fields.values() - # Add `# pragma: NO COVER` because there may not be any `*_pb2` field types - else: # pragma: NO COVER - message_fields = field.message.DESCRIPTOR.fields - return message_fields - - runtime_nested_fields = [ - (field.name, nested_field.name) - for field in get_message_fields(test_field) - for nested_field in get_message_fields(field) - ] - - subfields_not_in_runtime = [] - - # For each item in the sample request, create a list of sub fields which are not present at runtime - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for field, value in request_init["backup"].items(): # pragma: NO COVER - result = None - is_repeated = False - # For repeated fields - if isinstance(value, list) and len(value): - is_repeated = True - result = value[0] - # For fields where the type is another message - if isinstance(value, dict): - result = value - - if result and hasattr(result, "keys"): - for subfield in result.keys(): - if (field, subfield) not in runtime_nested_fields: - subfields_not_in_runtime.append( - {"field": field, "subfield": subfield, "is_repeated": is_repeated} - ) - - # Remove fields from the sample request which are not present in the runtime version of the dependency - # Add `# pragma: NO COVER` because this test code will not run if all subfields are present at runtime - for subfield_to_delete in subfields_not_in_runtime: # pragma: NO COVER - field = subfield_to_delete.get("field") - field_repeated = subfield_to_delete.get("is_repeated") - subfield = subfield_to_delete.get("subfield") - if subfield: - if field_repeated: - for i in range(0, len(request_init["backup"][field])): - del request_init["backup"][field][i][subfield] - else: - del request_init["backup"][field][subfield] - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup( - name='name_value', - source_table='source_table_value', - source_backup='source_backup_value', - size_bytes=1089, - state=table.Backup.State.CREATING, - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.update_backup(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, table.Backup) - assert response.name == 'name_value' - assert response.source_table == 'source_table_value' - assert response.source_backup == 'source_backup_value' - assert response.size_bytes == 1089 - assert response.state == table.Backup.State.CREATING - - -def test_update_backup_rest_required_fields(request_type=bigtable_table_admin.UpdateBackupRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).update_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).update_backup._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("update_mask", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = table.Backup() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "patch", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.update_backup(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_update_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.update_backup._get_unset_required_fields({}) - assert set(unset_fields) == (set(("updateMask", )) & set(("backup", "updateMask", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_update_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_update_backup") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_update_backup") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.UpdateBackupRequest.pb(bigtable_table_admin.UpdateBackupRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = table.Backup.to_json(table.Backup()) - - request = bigtable_table_admin.UpdateBackupRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = table.Backup() - - client.update_backup(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_update_backup_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.UpdateBackupRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'backup': {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'}} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.update_backup(request) - - -def test_update_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = table.Backup() - - # get arguments that satisfy an http rule for this method - sample_request = {'backup': {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'}} - - # get truthy value for each flattened field - mock_args = dict( - backup=table.Backup(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = table.Backup.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.update_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{backup.name=projects/*/instances/*/clusters/*/backups/*}" % client.transport._host, args[1]) - - -def test_update_backup_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.update_backup( - bigtable_table_admin.UpdateBackupRequest(), - backup=table.Backup(name='name_value'), - update_mask=field_mask_pb2.FieldMask(paths=['paths_value']), - ) - - -def test_update_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.DeleteBackupRequest, - dict, -]) -def test_delete_backup_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = None - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.delete_backup(request) - - # Establish that the response is the type that we expect. - assert response is None - - -def test_delete_backup_rest_required_fields(request_type=bigtable_table_admin.DeleteBackupRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["name"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).delete_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["name"] = 'name_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).delete_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "name" in jsonified_request - assert jsonified_request["name"] == 'name_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = None - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "delete", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.delete_backup(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_delete_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.delete_backup._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("name", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_delete_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_delete_backup") as pre: - pre.assert_not_called() - pb_message = bigtable_table_admin.DeleteBackupRequest.pb(bigtable_table_admin.DeleteBackupRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - - request = bigtable_table_admin.DeleteBackupRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - - client.delete_backup(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - - -def test_delete_backup_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.DeleteBackupRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.delete_backup(request) - - -def test_delete_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = None - - # get arguments that satisfy an http rule for this method - sample_request = {'name': 'projects/sample1/instances/sample2/clusters/sample3/backups/sample4'} - - # get truthy value for each flattened field - mock_args = dict( - name='name_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = '' - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.delete_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{name=projects/*/instances/*/clusters/*/backups/*}" % client.transport._host, args[1]) - - -def test_delete_backup_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.delete_backup( - bigtable_table_admin.DeleteBackupRequest(), - name='name_value', - ) - - -def test_delete_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.ListBackupsRequest, - dict, -]) -def test_list_backups_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse( - next_page_token='next_page_token_value', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.list_backups(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, pagers.ListBackupsPager) - assert response.next_page_token == 'next_page_token_value' - - -def test_list_backups_rest_required_fields(request_type=bigtable_table_admin.ListBackupsRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_backups._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).list_backups._get_unset_required_fields(jsonified_request) - # Check that path parameters and body parameters are not mixing in. - assert not set(unset_fields) - set(("filter", "order_by", "page_size", "page_token", )) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "get", - 'query_params': pb_request, - } - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.list_backups(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_list_backups_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.list_backups._get_unset_required_fields({}) - assert set(unset_fields) == (set(("filter", "orderBy", "pageSize", "pageToken", )) & set(("parent", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_list_backups_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_list_backups") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_list_backups") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.ListBackupsRequest.pb(bigtable_table_admin.ListBackupsRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = bigtable_table_admin.ListBackupsResponse.to_json(bigtable_table_admin.ListBackupsResponse()) - - request = bigtable_table_admin.ListBackupsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = bigtable_table_admin.ListBackupsResponse() - - client.list_backups(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_list_backups_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.ListBackupsRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.list_backups(request) - - -def test_list_backups_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = bigtable_table_admin.ListBackupsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - # Convert return value to protobuf type - return_value = bigtable_table_admin.ListBackupsResponse.pb(return_value) - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.list_backups(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*/instances/*/clusters/*}/backups" % client.transport._host, args[1]) - - -def test_list_backups_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.list_backups( - bigtable_table_admin.ListBackupsRequest(), - parent='parent_value', - ) - - -def test_list_backups_rest_pager(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # TODO(kbandes): remove this mock unless there's a good reason for it. - #with mock.patch.object(path_template, 'transcode') as transcode: - # Set the response as a series of pages - response = ( - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - table.Backup(), - ], - next_page_token='abc', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[], - next_page_token='def', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - ], - next_page_token='ghi', - ), - bigtable_table_admin.ListBackupsResponse( - backups=[ - table.Backup(), - table.Backup(), - ], - ), - ) - # Two responses for two calls - response = response + response - - # Wrap the values into proper Response objs - response = tuple(bigtable_table_admin.ListBackupsResponse.to_json(x) for x in response) - return_values = tuple(Response() for i in response) - for return_val, response_val in zip(return_values, response): - return_val._content = response_val.encode('UTF-8') - return_val.status_code = 200 - req.side_effect = return_values - - sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - - pager = client.list_backups(request=sample_request) - - results = list(pager) - assert len(results) == 6 - assert all(isinstance(i, table.Backup) - for i in results) - - pages = list(client.list_backups(request=sample_request).pages) - for page_, token in zip(pages, ['abc','def','ghi', '']): - assert page_.raw_page.next_page_token == token - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.RestoreTableRequest, - dict, -]) -def test_restore_table_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.restore_table(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_restore_table_rest_required_fields(request_type=bigtable_table_admin.RestoreTableRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["table_id"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).restore_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - jsonified_request["tableId"] = 'table_id_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).restore_table._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - assert "tableId" in jsonified_request - assert jsonified_request["tableId"] == 'table_id_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.restore_table(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_restore_table_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.restore_table._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("parent", "tableId", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_restore_table_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_restore_table") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_restore_table") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.RestoreTableRequest.pb(bigtable_table_admin.RestoreTableRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = bigtable_table_admin.RestoreTableRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.restore_table(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_restore_table_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.RestoreTableRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.restore_table(request) - - -def test_restore_table_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - bigtable_table_admin.CopyBackupRequest, - dict, -]) -def test_copy_backup_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.copy_backup(request) - - # Establish that the response is the type that we expect. - assert response.operation.name == "operations/spam" - - -def test_copy_backup_rest_required_fields(request_type=bigtable_table_admin.CopyBackupRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["parent"] = "" - request_init["backup_id"] = "" - request_init["source_backup"] = "" - request = request_type(**request_init) - pb_request = request_type.pb(request) - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).copy_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["parent"] = 'parent_value' - jsonified_request["backupId"] = 'backup_id_value' - jsonified_request["sourceBackup"] = 'source_backup_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).copy_backup._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "parent" in jsonified_request - assert jsonified_request["parent"] == 'parent_value' - assert "backupId" in jsonified_request - assert jsonified_request["backupId"] == 'backup_id_value' - assert "sourceBackup" in jsonified_request - assert jsonified_request["sourceBackup"] == 'source_backup_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request_type.pb(request) - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.copy_backup(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_copy_backup_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.copy_backup._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("parent", "backupId", "sourceBackup", "expireTime", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_copy_backup_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(operation.Operation, "_set_result_from_operation"), \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_copy_backup") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_copy_backup") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = bigtable_table_admin.CopyBackupRequest.pb(bigtable_table_admin.CopyBackupRequest()) - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(operations_pb2.Operation()) - - request = bigtable_table_admin.CopyBackupRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = operations_pb2.Operation() - - client.copy_backup(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_copy_backup_rest_bad_request(transport: str = 'rest', request_type=bigtable_table_admin.CopyBackupRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.copy_backup(request) - - -def test_copy_backup_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = operations_pb2.Operation(name='operations/spam') - - # get arguments that satisfy an http rule for this method - sample_request = {'parent': 'projects/sample1/instances/sample2/clusters/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - parent='parent_value', - backup_id='backup_id_value', - source_backup='source_backup_value', - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.copy_backup(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{parent=projects/*/instances/*/clusters/*}/backups:copy" % client.transport._host, args[1]) - - -def test_copy_backup_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.copy_backup( - bigtable_table_admin.CopyBackupRequest(), - parent='parent_value', - backup_id='backup_id_value', - source_backup='source_backup_value', - expire_time=timestamp_pb2.Timestamp(seconds=751), - ) - - -def test_copy_backup_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - iam_policy_pb2.GetIamPolicyRequest, - dict, -]) -def test_get_iam_policy_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b'etag_blob', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.get_iam_policy(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b'etag_blob' - - -def test_get_iam_policy_rest_required_fields(request_type=iam_policy_pb2.GetIamPolicyRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = 'resource_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).get_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == 'resource_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.get_iam_policy(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_get_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.get_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("resource", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_get_iam_policy_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_get_iam_policy") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_get_iam_policy") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.GetIamPolicyRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) - - request = iam_policy_pb2.GetIamPolicyRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() - - client.get_iam_policy(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_get_iam_policy_rest_bad_request(transport: str = 'rest', request_type=iam_policy_pb2.GetIamPolicyRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.get_iam_policy(request) - - -def test_get_iam_policy_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - resource='resource_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.get_iam_policy(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{resource=projects/*/instances/*/tables/*}:getIamPolicy" % client.transport._host, args[1]) - - -def test_get_iam_policy_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.get_iam_policy( - iam_policy_pb2.GetIamPolicyRequest(), - resource='resource_value', - ) - - -def test_get_iam_policy_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - iam_policy_pb2.SetIamPolicyRequest, - dict, -]) -def test_set_iam_policy_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy( - version=774, - etag=b'etag_blob', - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.set_iam_policy(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, policy_pb2.Policy) - assert response.version == 774 - assert response.etag == b'etag_blob' - - -def test_set_iam_policy_rest_required_fields(request_type=iam_policy_pb2.SetIamPolicyRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = 'resource_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).set_iam_policy._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == 'resource_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.set_iam_policy(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_set_iam_policy_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.set_iam_policy._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("resource", "policy", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_set_iam_policy_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_set_iam_policy") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_set_iam_policy") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.SetIamPolicyRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(policy_pb2.Policy()) - - request = iam_policy_pb2.SetIamPolicyRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = policy_pb2.Policy() - - client.set_iam_policy(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_set_iam_policy_rest_bad_request(transport: str = 'rest', request_type=iam_policy_pb2.SetIamPolicyRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.set_iam_policy(request) - - -def test_set_iam_policy_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = policy_pb2.Policy() - - # get arguments that satisfy an http rule for this method - sample_request = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - resource='resource_value', - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.set_iam_policy(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{resource=projects/*/instances/*/tables/*}:setIamPolicy" % client.transport._host, args[1]) - - -def test_set_iam_policy_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.set_iam_policy( - iam_policy_pb2.SetIamPolicyRequest(), - resource='resource_value', - ) - - -def test_set_iam_policy_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -@pytest.mark.parametrize("request_type", [ - iam_policy_pb2.TestIamPermissionsRequest, - dict, -]) -def test_test_iam_permissions_rest(request_type): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # send a request that will satisfy transcoding - request_init = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse( - permissions=['permissions_value'], - ) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - response = client.test_iam_permissions(request) - - # Establish that the response is the type that we expect. - assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse) - assert response.permissions == ['permissions_value'] - - -def test_test_iam_permissions_rest_required_fields(request_type=iam_policy_pb2.TestIamPermissionsRequest): - transport_class = transports.BigtableTableAdminRestTransport - - request_init = {} - request_init["resource"] = "" - request_init["permissions"] = "" - request = request_type(**request_init) - pb_request = request - jsonified_request = json.loads(json_format.MessageToJson( - pb_request, - including_default_value_fields=False, - use_integers_for_enums=False - )) - - # verify fields with default values are dropped - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with default values are now present - - jsonified_request["resource"] = 'resource_value' - jsonified_request["permissions"] = 'permissions_value' - - unset_fields = transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()).test_iam_permissions._get_unset_required_fields(jsonified_request) - jsonified_request.update(unset_fields) - - # verify required fields with non-default values are left alone - assert "resource" in jsonified_request - assert jsonified_request["resource"] == 'resource_value' - assert "permissions" in jsonified_request - assert jsonified_request["permissions"] == 'permissions_value' - - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - request = request_type(**request_init) - - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() - # Mock the http request call within the method and fake a response. - with mock.patch.object(Session, 'request') as req: - # We need to mock transcode() because providing default values - # for required fields will fail the real version if the http_options - # expect actual values for those fields. - with mock.patch.object(path_template, 'transcode') as transcode: - # A uri without fields and an empty body will force all the - # request fields to show up in the query_params. - pb_request = request - transcode_result = { - 'uri': 'v1/sample_method', - 'method': "post", - 'query_params': pb_request, - } - transcode_result['body'] = pb_request - transcode.return_value = transcode_result - - response_value = Response() - response_value.status_code = 200 - - json_return_value = json_format.MessageToJson(return_value) - - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - response = client.test_iam_permissions(request) - - expected_params = [ - ('$alt', 'json;enum-encoding=int') - ] - actual_params = req.call_args.kwargs['params'] - assert expected_params == actual_params - - -def test_test_iam_permissions_rest_unset_required_fields(): - transport = transports.BigtableTableAdminRestTransport(credentials=_AnonymousCredentialsWithUniverseDomain) - - unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) - assert set(unset_fields) == (set(()) & set(("resource", "permissions", ))) - - -@pytest.mark.parametrize("null_interceptor", [True, False]) -def test_test_iam_permissions_rest_interceptors(null_interceptor): - transport = transports.BigtableTableAdminRestTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), - ) - client = BigtableTableAdminClient(transport=transport) - with mock.patch.object(type(client.transport._session), "request") as req, \ - mock.patch.object(path_template, "transcode") as transcode, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "post_test_iam_permissions") as post, \ - mock.patch.object(transports.BigtableTableAdminRestInterceptor, "pre_test_iam_permissions") as pre: - pre.assert_not_called() - post.assert_not_called() - pb_message = iam_policy_pb2.TestIamPermissionsRequest() - transcode.return_value = { - "method": "post", - "uri": "my_uri", - "body": pb_message, - "query_params": pb_message, - } - - req.return_value = Response() - req.return_value.status_code = 200 - req.return_value.request = PreparedRequest() - req.return_value._content = json_format.MessageToJson(iam_policy_pb2.TestIamPermissionsResponse()) - - request = iam_policy_pb2.TestIamPermissionsRequest() - metadata =[ - ("key", "val"), - ("cephalopod", "squid"), - ] - pre.return_value = request, metadata - post.return_value = iam_policy_pb2.TestIamPermissionsResponse() - - client.test_iam_permissions(request, metadata=[("key", "val"), ("cephalopod", "squid"),]) - - pre.assert_called_once() - post.assert_called_once() - - -def test_test_iam_permissions_rest_bad_request(transport: str = 'rest', request_type=iam_policy_pb2.TestIamPermissionsRequest): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # send a request that will satisfy transcoding - request_init = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} - request = request_type(**request_init) - - # Mock the http request call within the method and fake a BadRequest error. - with mock.patch.object(Session, 'request') as req, pytest.raises(core_exceptions.BadRequest): - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 400 - response_value.request = Request() - req.return_value = response_value - client.test_iam_permissions(request) - - -def test_test_iam_permissions_rest_flattened(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="rest", - ) - - # Mock the http request call within the method and fake a response. - with mock.patch.object(type(client.transport._session), 'request') as req: - # Designate an appropriate value for the returned response. - return_value = iam_policy_pb2.TestIamPermissionsResponse() - - # get arguments that satisfy an http rule for this method - sample_request = {'resource': 'projects/sample1/instances/sample2/tables/sample3'} - - # get truthy value for each flattened field - mock_args = dict( - resource='resource_value', - permissions=['permissions_value'], - ) - mock_args.update(sample_request) - - # Wrap the value into a proper Response obj - response_value = Response() - response_value.status_code = 200 - json_return_value = json_format.MessageToJson(return_value) - response_value._content = json_return_value.encode('UTF-8') - req.return_value = response_value - - client.test_iam_permissions(**mock_args) - - # Establish that the underlying call was made with the expected - # request object values. - assert len(req.mock_calls) == 1 - _, args, _ = req.mock_calls[0] - assert path_template.validate("%s/v2/{resource=projects/*/instances/*/tables/*}:testIamPermissions" % client.transport._host, args[1]) - - -def test_test_iam_permissions_rest_flattened_error(transport: str = 'rest'): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # Attempting to call a method with both a request object and flattened - # fields is an error. - with pytest.raises(ValueError): - client.test_iam_permissions( - iam_policy_pb2.TestIamPermissionsRequest(), - resource='resource_value', - permissions=['permissions_value'], - ) - - -def test_test_iam_permissions_rest_error(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest' - ) - - -def test_credentials_transport_error(): - # It is an error to provide credentials and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport, - ) - - # It is an error to provide a credentials file and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options={"credentials_file": "credentials.json"}, - transport=transport, - ) - - # It is an error to provide an api_key and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options=options, - transport=transport, - ) - - # It is an error to provide an api_key and a credential. - options = client_options.ClientOptions() - options.api_key = "api_key" - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options=options, - credentials=_AnonymousCredentialsWithUniverseDomain() - ) - - # It is an error to provide scopes and a transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - with pytest.raises(ValueError): - client = BigtableTableAdminClient( - client_options={"scopes": ["1", "2"]}, - transport=transport, - ) - - -def test_transport_instance(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - client = BigtableTableAdminClient(transport=transport) - assert client.transport is transport - -def test_transport_get_channel(): - # A client may be instantiated with a custom transport instance. - transport = transports.BigtableTableAdminGrpcTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - channel = transport.grpc_channel - assert channel - - transport = transports.BigtableTableAdminGrpcAsyncIOTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - channel = transport.grpc_channel - assert channel - -@pytest.mark.parametrize("transport_class", [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - transports.BigtableTableAdminRestTransport, -]) -def test_transport_adc(transport_class): - # Test default credentials are used if not provided. - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - transport_class() - adc.assert_called_once() - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "rest", -]) -def test_transport_kind(transport_name): - transport = BigtableTableAdminClient.get_transport_class(transport_name)( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - assert transport.kind == transport_name - -def test_transport_grpc_default(): - # A client should use the gRPC transport by default. - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - assert isinstance( - client.transport, - transports.BigtableTableAdminGrpcTransport, - ) - -def test_bigtable_table_admin_base_transport_error(): - # Passing both a credentials object and credentials_file should raise an error - with pytest.raises(core_exceptions.DuplicateCredentialArgs): - transport = transports.BigtableTableAdminTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - credentials_file="credentials.json" - ) - - -def test_bigtable_table_admin_base_transport(): - # Instantiate the base transport. - with mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport.__init__') as Transport: - Transport.return_value = None - transport = transports.BigtableTableAdminTransport( - credentials=_AnonymousCredentialsWithUniverseDomain(), - ) - - # Every method on the transport should just blindly - # raise NotImplementedError. - methods = ( - 'create_table', - 'create_table_from_snapshot', - 'list_tables', - 'get_table', - 'update_table', - 'delete_table', - 'undelete_table', - 'modify_column_families', - 'drop_row_range', - 'generate_consistency_token', - 'check_consistency', - 'snapshot_table', - 'get_snapshot', - 'list_snapshots', - 'delete_snapshot', - 'create_backup', - 'get_backup', - 'update_backup', - 'delete_backup', - 'list_backups', - 'restore_table', - 'copy_backup', - 'get_iam_policy', - 'set_iam_policy', - 'test_iam_permissions', - ) - for method in methods: - with pytest.raises(NotImplementedError): - getattr(transport, method)(request=object()) - - with pytest.raises(NotImplementedError): - transport.close() - - # Additionally, the LRO client (a property) should - # also raise NotImplementedError - with pytest.raises(NotImplementedError): - transport.operations_client - - # Catch all for all remaining methods and properties - remainder = [ - 'kind', - ] - for r in remainder: - with pytest.raises(NotImplementedError): - getattr(transport, r)() - - -def test_bigtable_table_admin_base_transport_with_credentials_file(): - # Instantiate the base transport with a credentials file - with mock.patch.object(google.auth, 'load_credentials_from_file', autospec=True) as load_creds, mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - load_creds.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - transport = transports.BigtableTableAdminTransport( - credentials_file="credentials.json", - quota_project_id="octopus", - ) - load_creds.assert_called_once_with("credentials.json", - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - quota_project_id="octopus", - ) - - -def test_bigtable_table_admin_base_transport_with_adc(): - # Test the default credentials are used if credentials and credentials_file are None. - with mock.patch.object(google.auth, 'default', autospec=True) as adc, mock.patch('google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages') as Transport: - Transport.return_value = None - adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - transport = transports.BigtableTableAdminTransport() - adc.assert_called_once() - - -def test_bigtable_table_admin_auth_adc(): - # If no credentials are provided, we should use ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - BigtableTableAdminClient() - adc.assert_called_once_with( - scopes=None, - default_scopes=( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - quota_project_id=None, - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - ], -) -def test_bigtable_table_admin_transport_auth_adc(transport_class): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) - transport_class(quota_project_id="octopus", scopes=["1", "2"]) - adc.assert_called_once_with( - scopes=["1", "2"], - default_scopes=( 'https://www.googleapis.com/auth/bigtable.admin', 'https://www.googleapis.com/auth/bigtable.admin.table', 'https://www.googleapis.com/auth/cloud-bigtable.admin', 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', 'https://www.googleapis.com/auth/cloud-platform', 'https://www.googleapis.com/auth/cloud-platform.read-only',), - quota_project_id="octopus", - ) - - -@pytest.mark.parametrize( - "transport_class", - [ - transports.BigtableTableAdminGrpcTransport, - transports.BigtableTableAdminGrpcAsyncIOTransport, - transports.BigtableTableAdminRestTransport, - ], -) -def test_bigtable_table_admin_transport_auth_gdch_credentials(transport_class): - host = 'https://language.com' - api_audience_tests = [None, 'https://language2.com'] - api_audience_expect = [host, 'https://language2.com'] - for t, e in zip(api_audience_tests, api_audience_expect): - with mock.patch.object(google.auth, 'default', autospec=True) as adc: - gdch_mock = mock.MagicMock() - type(gdch_mock).with_gdch_audience = mock.PropertyMock(return_value=gdch_mock) - adc.return_value = (gdch_mock, None) - transport_class(host=host, api_audience=t) - gdch_mock.with_gdch_audience.assert_called_once_with( - e - ) - - -@pytest.mark.parametrize( - "transport_class,grpc_helpers", - [ - (transports.BigtableTableAdminGrpcTransport, grpc_helpers), - (transports.BigtableTableAdminGrpcAsyncIOTransport, grpc_helpers_async) - ], -) -def test_bigtable_table_admin_transport_create_channel(transport_class, grpc_helpers): - # If credentials and host are not provided, the transport class should use - # ADC credentials. - with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch.object( - grpc_helpers, "create_channel", autospec=True - ) as create_channel: - creds = _AnonymousCredentialsWithUniverseDomain() - adc.return_value = (creds, None) - transport_class( - quota_project_id="octopus", - scopes=["1", "2"] - ) - - create_channel.assert_called_with( - "bigtableadmin.googleapis.com:443", - credentials=creds, - credentials_file=None, - quota_project_id="octopus", - default_scopes=( - 'https://www.googleapis.com/auth/bigtable.admin', - 'https://www.googleapis.com/auth/bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-bigtable.admin', - 'https://www.googleapis.com/auth/cloud-bigtable.admin.table', - 'https://www.googleapis.com/auth/cloud-platform', - 'https://www.googleapis.com/auth/cloud-platform.read-only', -), - scopes=["1", "2"], - default_host="bigtableadmin.googleapis.com", - ssl_credentials=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - -@pytest.mark.parametrize("transport_class", [transports.BigtableTableAdminGrpcTransport, transports.BigtableTableAdminGrpcAsyncIOTransport]) -def test_bigtable_table_admin_grpc_transport_client_cert_source_for_mtls( - transport_class -): - cred = _AnonymousCredentialsWithUniverseDomain() - - # Check ssl_channel_credentials is used if provided. - with mock.patch.object(transport_class, "create_channel") as mock_create_channel: - mock_ssl_channel_creds = mock.Mock() - transport_class( - host="squid.clam.whelk", - credentials=cred, - ssl_channel_credentials=mock_ssl_channel_creds - ) - mock_create_channel.assert_called_once_with( - "squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_channel_creds, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - - # Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls - # is used. - with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()): - with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred: - transport_class( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - expected_cert, expected_key = client_cert_source_callback() - mock_ssl_cred.assert_called_once_with( - certificate_chain=expected_cert, - private_key=expected_key - ) - -def test_bigtable_table_admin_http_transport_client_cert_source_for_mtls(): - cred = _AnonymousCredentialsWithUniverseDomain() - with mock.patch("google.auth.transport.requests.AuthorizedSession.configure_mtls_channel") as mock_configure_mtls_channel: - transports.BigtableTableAdminRestTransport ( - credentials=cred, - client_cert_source_for_mtls=client_cert_source_callback - ) - mock_configure_mtls_channel.assert_called_once_with(client_cert_source_callback) - - -def test_bigtable_table_admin_rest_lro_client(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='rest', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.AbstractOperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_bigtable_table_admin_host_no_port(transport_name): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - client_options=client_options.ClientOptions(api_endpoint='bigtableadmin.googleapis.com'), - transport=transport_name, - ) - assert client.transport._host == ( - 'bigtableadmin.googleapis.com:443' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://bigtableadmin.googleapis.com' - ) - -@pytest.mark.parametrize("transport_name", [ - "grpc", - "grpc_asyncio", - "rest", -]) -def test_bigtable_table_admin_host_with_port(transport_name): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - client_options=client_options.ClientOptions(api_endpoint='bigtableadmin.googleapis.com:8000'), - transport=transport_name, - ) - assert client.transport._host == ( - 'bigtableadmin.googleapis.com:8000' - if transport_name in ['grpc', 'grpc_asyncio'] - else 'https://bigtableadmin.googleapis.com:8000' - ) - -@pytest.mark.parametrize("transport_name", [ - "rest", -]) -def test_bigtable_table_admin_client_transport_session_collision(transport_name): - creds1 = _AnonymousCredentialsWithUniverseDomain() - creds2 = _AnonymousCredentialsWithUniverseDomain() - client1 = BigtableTableAdminClient( - credentials=creds1, - transport=transport_name, - ) - client2 = BigtableTableAdminClient( - credentials=creds2, - transport=transport_name, - ) - session1 = client1.transport.create_table._session - session2 = client2.transport.create_table._session - assert session1 != session2 - session1 = client1.transport.create_table_from_snapshot._session - session2 = client2.transport.create_table_from_snapshot._session - assert session1 != session2 - session1 = client1.transport.list_tables._session - session2 = client2.transport.list_tables._session - assert session1 != session2 - session1 = client1.transport.get_table._session - session2 = client2.transport.get_table._session - assert session1 != session2 - session1 = client1.transport.update_table._session - session2 = client2.transport.update_table._session - assert session1 != session2 - session1 = client1.transport.delete_table._session - session2 = client2.transport.delete_table._session - assert session1 != session2 - session1 = client1.transport.undelete_table._session - session2 = client2.transport.undelete_table._session - assert session1 != session2 - session1 = client1.transport.modify_column_families._session - session2 = client2.transport.modify_column_families._session - assert session1 != session2 - session1 = client1.transport.drop_row_range._session - session2 = client2.transport.drop_row_range._session - assert session1 != session2 - session1 = client1.transport.generate_consistency_token._session - session2 = client2.transport.generate_consistency_token._session - assert session1 != session2 - session1 = client1.transport.check_consistency._session - session2 = client2.transport.check_consistency._session - assert session1 != session2 - session1 = client1.transport.snapshot_table._session - session2 = client2.transport.snapshot_table._session - assert session1 != session2 - session1 = client1.transport.get_snapshot._session - session2 = client2.transport.get_snapshot._session - assert session1 != session2 - session1 = client1.transport.list_snapshots._session - session2 = client2.transport.list_snapshots._session - assert session1 != session2 - session1 = client1.transport.delete_snapshot._session - session2 = client2.transport.delete_snapshot._session - assert session1 != session2 - session1 = client1.transport.create_backup._session - session2 = client2.transport.create_backup._session - assert session1 != session2 - session1 = client1.transport.get_backup._session - session2 = client2.transport.get_backup._session - assert session1 != session2 - session1 = client1.transport.update_backup._session - session2 = client2.transport.update_backup._session - assert session1 != session2 - session1 = client1.transport.delete_backup._session - session2 = client2.transport.delete_backup._session - assert session1 != session2 - session1 = client1.transport.list_backups._session - session2 = client2.transport.list_backups._session - assert session1 != session2 - session1 = client1.transport.restore_table._session - session2 = client2.transport.restore_table._session - assert session1 != session2 - session1 = client1.transport.copy_backup._session - session2 = client2.transport.copy_backup._session - assert session1 != session2 - session1 = client1.transport.get_iam_policy._session - session2 = client2.transport.get_iam_policy._session - assert session1 != session2 - session1 = client1.transport.set_iam_policy._session - session2 = client2.transport.set_iam_policy._session - assert session1 != session2 - session1 = client1.transport.test_iam_permissions._session - session2 = client2.transport.test_iam_permissions._session - assert session1 != session2 -def test_bigtable_table_admin_grpc_transport_channel(): - channel = grpc.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.BigtableTableAdminGrpcTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -def test_bigtable_table_admin_grpc_asyncio_transport_channel(): - channel = aio.secure_channel('http://localhost/', grpc.local_channel_credentials()) - - # Check that channel is used if provided. - transport = transports.BigtableTableAdminGrpcAsyncIOTransport( - host="squid.clam.whelk", - channel=channel, - ) - assert transport.grpc_channel == channel - assert transport._host == "squid.clam.whelk:443" - assert transport._ssl_channel_credentials == None - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.BigtableTableAdminGrpcTransport, transports.BigtableTableAdminGrpcAsyncIOTransport]) -def test_bigtable_table_admin_transport_channel_mtls_with_client_cert_source( - transport_class -): - with mock.patch("grpc.ssl_channel_credentials", autospec=True) as grpc_ssl_channel_cred: - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_ssl_cred = mock.Mock() - grpc_ssl_channel_cred.return_value = mock_ssl_cred - - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - - cred = _AnonymousCredentialsWithUniverseDomain() - with pytest.warns(DeprecationWarning): - with mock.patch.object(google.auth, 'default') as adc: - adc.return_value = (cred, None) - transport = transport_class( - host="squid.clam.whelk", - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=client_cert_source_callback, - ) - adc.assert_called_once() - - grpc_ssl_channel_cred.assert_called_once_with( - certificate_chain=b"cert bytes", private_key=b"key bytes" - ) - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - assert transport._ssl_channel_credentials == mock_ssl_cred - - -# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are -# removed from grpc/grpc_asyncio transport constructor. -@pytest.mark.parametrize("transport_class", [transports.BigtableTableAdminGrpcTransport, transports.BigtableTableAdminGrpcAsyncIOTransport]) -def test_bigtable_table_admin_transport_channel_mtls_with_adc( - transport_class -): - mock_ssl_cred = mock.Mock() - with mock.patch.multiple( - "google.auth.transport.grpc.SslCredentials", - __init__=mock.Mock(return_value=None), - ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred), - ): - with mock.patch.object(transport_class, "create_channel") as grpc_create_channel: - mock_grpc_channel = mock.Mock() - grpc_create_channel.return_value = mock_grpc_channel - mock_cred = mock.Mock() - - with pytest.warns(DeprecationWarning): - transport = transport_class( - host="squid.clam.whelk", - credentials=mock_cred, - api_mtls_endpoint="mtls.squid.clam.whelk", - client_cert_source=None, - ) - - grpc_create_channel.assert_called_once_with( - "mtls.squid.clam.whelk:443", - credentials=mock_cred, - credentials_file=None, - scopes=None, - ssl_credentials=mock_ssl_cred, - quota_project_id=None, - options=[ - ("grpc.max_send_message_length", -1), - ("grpc.max_receive_message_length", -1), - ], - ) - assert transport.grpc_channel == mock_grpc_channel - - -def test_bigtable_table_admin_grpc_lro_client(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_bigtable_table_admin_grpc_lro_async_client(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport='grpc_asyncio', - ) - transport = client.transport - - # Ensure that we have a api-core operations client. - assert isinstance( - transport.operations_client, - operations_v1.OperationsAsyncClient, - ) - - # Ensure that subsequent calls to the property send the exact same object. - assert transport.operations_client is transport.operations_client - - -def test_backup_path(): - project = "squid" - instance = "clam" - cluster = "whelk" - backup = "octopus" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}".format(project=project, instance=instance, cluster=cluster, backup=backup, ) - actual = BigtableTableAdminClient.backup_path(project, instance, cluster, backup) - assert expected == actual - - -def test_parse_backup_path(): - expected = { - "project": "oyster", - "instance": "nudibranch", - "cluster": "cuttlefish", - "backup": "mussel", - } - path = BigtableTableAdminClient.backup_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_backup_path(path) - assert expected == actual - -def test_cluster_path(): - project = "winkle" - instance = "nautilus" - cluster = "scallop" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}".format(project=project, instance=instance, cluster=cluster, ) - actual = BigtableTableAdminClient.cluster_path(project, instance, cluster) - assert expected == actual - - -def test_parse_cluster_path(): - expected = { - "project": "abalone", - "instance": "squid", - "cluster": "clam", - } - path = BigtableTableAdminClient.cluster_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_cluster_path(path) - assert expected == actual - -def test_crypto_key_version_path(): - project = "whelk" - location = "octopus" - key_ring = "oyster" - crypto_key = "nudibranch" - crypto_key_version = "cuttlefish" - expected = "projects/{project}/locations/{location}/keyRings/{key_ring}/cryptoKeys/{crypto_key}/cryptoKeyVersions/{crypto_key_version}".format(project=project, location=location, key_ring=key_ring, crypto_key=crypto_key, crypto_key_version=crypto_key_version, ) - actual = BigtableTableAdminClient.crypto_key_version_path(project, location, key_ring, crypto_key, crypto_key_version) - assert expected == actual - - -def test_parse_crypto_key_version_path(): - expected = { - "project": "mussel", - "location": "winkle", - "key_ring": "nautilus", - "crypto_key": "scallop", - "crypto_key_version": "abalone", - } - path = BigtableTableAdminClient.crypto_key_version_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_crypto_key_version_path(path) - assert expected == actual - -def test_instance_path(): - project = "squid" - instance = "clam" - expected = "projects/{project}/instances/{instance}".format(project=project, instance=instance, ) - actual = BigtableTableAdminClient.instance_path(project, instance) - assert expected == actual - - -def test_parse_instance_path(): - expected = { - "project": "whelk", - "instance": "octopus", - } - path = BigtableTableAdminClient.instance_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_instance_path(path) - assert expected == actual - -def test_snapshot_path(): - project = "oyster" - instance = "nudibranch" - cluster = "cuttlefish" - snapshot = "mussel" - expected = "projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}".format(project=project, instance=instance, cluster=cluster, snapshot=snapshot, ) - actual = BigtableTableAdminClient.snapshot_path(project, instance, cluster, snapshot) - assert expected == actual - - -def test_parse_snapshot_path(): - expected = { - "project": "winkle", - "instance": "nautilus", - "cluster": "scallop", - "snapshot": "abalone", - } - path = BigtableTableAdminClient.snapshot_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_snapshot_path(path) - assert expected == actual - -def test_table_path(): - project = "squid" - instance = "clam" - table = "whelk" - expected = "projects/{project}/instances/{instance}/tables/{table}".format(project=project, instance=instance, table=table, ) - actual = BigtableTableAdminClient.table_path(project, instance, table) - assert expected == actual - - -def test_parse_table_path(): - expected = { - "project": "octopus", - "instance": "oyster", - "table": "nudibranch", - } - path = BigtableTableAdminClient.table_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_table_path(path) - assert expected == actual - -def test_common_billing_account_path(): - billing_account = "cuttlefish" - expected = "billingAccounts/{billing_account}".format(billing_account=billing_account, ) - actual = BigtableTableAdminClient.common_billing_account_path(billing_account) - assert expected == actual - - -def test_parse_common_billing_account_path(): - expected = { - "billing_account": "mussel", - } - path = BigtableTableAdminClient.common_billing_account_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_billing_account_path(path) - assert expected == actual - -def test_common_folder_path(): - folder = "winkle" - expected = "folders/{folder}".format(folder=folder, ) - actual = BigtableTableAdminClient.common_folder_path(folder) - assert expected == actual - - -def test_parse_common_folder_path(): - expected = { - "folder": "nautilus", - } - path = BigtableTableAdminClient.common_folder_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_folder_path(path) - assert expected == actual - -def test_common_organization_path(): - organization = "scallop" - expected = "organizations/{organization}".format(organization=organization, ) - actual = BigtableTableAdminClient.common_organization_path(organization) - assert expected == actual - - -def test_parse_common_organization_path(): - expected = { - "organization": "abalone", - } - path = BigtableTableAdminClient.common_organization_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_organization_path(path) - assert expected == actual - -def test_common_project_path(): - project = "squid" - expected = "projects/{project}".format(project=project, ) - actual = BigtableTableAdminClient.common_project_path(project) - assert expected == actual - - -def test_parse_common_project_path(): - expected = { - "project": "clam", - } - path = BigtableTableAdminClient.common_project_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_project_path(path) - assert expected == actual - -def test_common_location_path(): - project = "whelk" - location = "octopus" - expected = "projects/{project}/locations/{location}".format(project=project, location=location, ) - actual = BigtableTableAdminClient.common_location_path(project, location) - assert expected == actual - - -def test_parse_common_location_path(): - expected = { - "project": "oyster", - "location": "nudibranch", - } - path = BigtableTableAdminClient.common_location_path(**expected) - - # Check that the path construction is reversible. - actual = BigtableTableAdminClient.parse_common_location_path(path) - assert expected == actual - - -def test_client_with_default_client_info(): - client_info = gapic_v1.client_info.ClientInfo() - - with mock.patch.object(transports.BigtableTableAdminTransport, '_prep_wrapped_messages') as prep: - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - - with mock.patch.object(transports.BigtableTableAdminTransport, '_prep_wrapped_messages') as prep: - transport_class = BigtableTableAdminClient.get_transport_class() - transport = transport_class( - credentials=_AnonymousCredentialsWithUniverseDomain(), - client_info=client_info, - ) - prep.assert_called_once_with(client_info) - -@pytest.mark.asyncio -async def test_transport_close_async(): - client = BigtableTableAdminAsyncClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport="grpc_asyncio", - ) - with mock.patch.object(type(getattr(client.transport, "grpc_channel")), "close") as close: - async with client: - close.assert_not_called() - close.assert_called_once() - - -def test_transport_close(): - transports = { - "rest": "_session", - "grpc": "_grpc_channel", - } - - for transport, close_name in transports.items(): - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport - ) - with mock.patch.object(type(getattr(client.transport, close_name)), "close") as close: - with client: - close.assert_not_called() - close.assert_called_once() - -def test_client_ctx(): - transports = [ - 'rest', - 'grpc', - ] - for transport in transports: - client = BigtableTableAdminClient( - credentials=_AnonymousCredentialsWithUniverseDomain(), - transport=transport - ) - # Test client calls underlying transport. - with mock.patch.object(type(client.transport), "close") as close: - close.assert_not_called() - with client: - pass - close.assert_called() - -@pytest.mark.parametrize("client_class,transport_class", [ - (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport), - (BigtableTableAdminAsyncClient, transports.BigtableTableAdminGrpcAsyncIOTransport), -]) -def test_api_key_credentials(client_class, transport_class): - with mock.patch.object( - google.auth._default, "get_api_key_credentials", create=True - ) as get_api_key_credentials: - mock_cred = mock.Mock() - get_api_key_credentials.return_value = mock_cred - options = client_options.ClientOptions() - options.api_key = "api_key" - with mock.patch.object(transport_class, "__init__") as patched: - patched.return_value = None - client = client_class(client_options=options) - patched.assert_called_once_with( - credentials=mock_cred, - credentials_file=None, - host=client._DEFAULT_ENDPOINT_TEMPLATE.format(UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE), - scopes=None, - client_cert_source_for_mtls=None, - quota_project_id=None, - client_info=transports.base.DEFAULT_CLIENT_INFO, - always_use_jwt_access=True, - api_audience=None, - ) diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py index ddbf0032f..c1411e306 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_instance_admin.py @@ -29,6 +29,7 @@ import json import math import pytest +from google.api_core import api_core_version from proto.marshal.rules.dates import DurationRule, TimestampRule from proto.marshal.rules import wrappers from requests import Response @@ -86,6 +87,29 @@ def modify_default_endpoint(client): ) +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +# Anonymous Credentials with universe domain property. If no universe domain is provided, then +# the default universe domain is "googleapis.com". +class _AnonymousCredentialsWithUniverseDomain(ga_credentials.AnonymousCredentials): + def __init__(self, universe_domain="googleapis.com"): + super(_AnonymousCredentialsWithUniverseDomain, self).__init__() + self._universe_domain = universe_domain + + @property + def universe_domain(self): + return self._universe_domain + + def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" @@ -116,6 +140,291 @@ def test__get_default_mtls_endpoint(): ) +def test__read_environment_variables(): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + BigtableInstanceAdminClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableInstanceAdminClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert BigtableInstanceAdminClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert BigtableInstanceAdminClient._get_client_cert_source(None, False) is None + assert ( + BigtableInstanceAdminClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + BigtableInstanceAdminClient._get_client_cert_source( + mock_provided_cert_source, True + ) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + BigtableInstanceAdminClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + BigtableInstanceAdminClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + BigtableInstanceAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminClient), +) +@mock.patch.object( + BigtableInstanceAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + default_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, None, default_universe, "auto" + ) + == default_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == BigtableInstanceAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, None, mock_universe, "never" + ) + == mock_endpoint + ) + assert ( + BigtableInstanceAdminClient._get_api_endpoint( + None, None, default_universe, "never" + ) + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableInstanceAdminClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + BigtableInstanceAdminClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + BigtableInstanceAdminClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + BigtableInstanceAdminClient._get_universe_domain(None, None) + == BigtableInstanceAdminClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + BigtableInstanceAdminClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminGrpcTransport, + "grpc", + ), + ( + BigtableInstanceAdminClient, + transports.BigtableInstanceAdminRestTransport, + "rest", + ), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # Test the case when there is a universe mismatch from the credentials. + client = client_class( + transport=transport_class( + credentials=_AnonymousCredentialsWithUniverseDomain( + universe_domain="foo.com" + ) + ) + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor, _ = [ + int(part) for part in api_core_version.__version__.split(".") + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=_AnonymousCredentialsWithUniverseDomain(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + @pytest.mark.parametrize( "client_class,transport_name", [ @@ -127,7 +436,7 @@ def test__get_default_mtls_endpoint(): def test_bigtable_instance_admin_client_from_service_account_info( client_class, transport_name ): - creds = ga_credentials.AnonymousCredentials() + creds = _AnonymousCredentialsWithUniverseDomain() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: @@ -181,7 +490,7 @@ def test_bigtable_instance_admin_client_service_account_always_use_jwt( def test_bigtable_instance_admin_client_from_service_account_file( client_class, transport_name ): - creds = ga_credentials.AnonymousCredentials() + creds = _AnonymousCredentialsWithUniverseDomain() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: @@ -239,20 +548,22 @@ def test_bigtable_instance_admin_client_get_transport_class(): ) @mock.patch.object( BigtableInstanceAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminClient), ) @mock.patch.object( BigtableInstanceAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), ) def test_bigtable_instance_admin_client_client_options( client_class, transport_class, transport_name ): # Check that if channel is provided we won't create a new one. with mock.patch.object(BigtableInstanceAdminClient, "get_transport_class") as gtc: - transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + transport = transport_class( + credentials=_AnonymousCredentialsWithUniverseDomain() + ) client = client_class(transport=transport) gtc.assert_not_called() @@ -287,7 +598,9 @@ def test_bigtable_instance_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -317,15 +630,23 @@ def test_bigtable_instance_admin_client_client_options( # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): + with pytest.raises(MutualTLSChannelError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): - with pytest.raises(ValueError): + with pytest.raises(ValueError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") @@ -335,7 +656,9 @@ def test_bigtable_instance_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", @@ -353,7 +676,9 @@ def test_bigtable_instance_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -406,13 +731,13 @@ def test_bigtable_instance_admin_client_client_options( ) @mock.patch.object( BigtableInstanceAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminClient), ) @mock.patch.object( BigtableInstanceAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableInstanceAdminAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_bigtable_instance_admin_client_mtls_env_auto( @@ -435,7 +760,9 @@ def test_bigtable_instance_admin_client_mtls_env_auto( if use_client_cert_env == "false": expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -467,7 +794,9 @@ def test_bigtable_instance_admin_client_mtls_env_auto( return_value=client_cert_source_callback, ): if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -501,7 +830,9 @@ def test_bigtable_instance_admin_client_mtls_env_auto( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -591,6 +922,118 @@ def test_bigtable_instance_admin_client_get_mtls_endpoint_and_cert_source(client assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", [BigtableInstanceAdminClient, BigtableInstanceAdminAsyncClient] +) +@mock.patch.object( + BigtableInstanceAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminClient), +) +@mock.patch.object( + BigtableInstanceAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableInstanceAdminAsyncClient), +) +def test_bigtable_instance_admin_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = BigtableInstanceAdminClient._DEFAULT_UNIVERSE + default_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableInstanceAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=_AnonymousCredentialsWithUniverseDomain()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=_AnonymousCredentialsWithUniverseDomain()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), + ) + else: + client = client_class( + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), + ) + assert client.api_endpoint == default_endpoint + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -625,7 +1068,9 @@ def test_bigtable_instance_admin_client_client_options_scopes( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, @@ -670,7 +1115,9 @@ def test_bigtable_instance_admin_client_client_options_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -730,7 +1177,9 @@ def test_bigtable_instance_admin_client_create_channel_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -747,8 +1196,8 @@ def test_bigtable_instance_admin_client_create_channel_credentials_file( ) as adc, mock.patch.object( grpc_helpers, "create_channel" ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - file_creds = ga_credentials.AnonymousCredentials() + creds = _AnonymousCredentialsWithUniverseDomain() + file_creds = _AnonymousCredentialsWithUniverseDomain() load_creds.return_value = (file_creds, None) adc.return_value = (creds, None) client = client_class(client_options=options, transport=transport_name) @@ -785,7 +1234,7 @@ def test_bigtable_instance_admin_client_create_channel_credentials_file( ) def test_create_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -812,7 +1261,7 @@ def test_create_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -830,7 +1279,7 @@ async def test_create_instance_async( request_type=bigtable_instance_admin.CreateInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -862,7 +1311,7 @@ async def test_create_instance_async_from_dict(): def test_create_instance_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -892,7 +1341,7 @@ def test_create_instance_field_headers(): @pytest.mark.asyncio async def test_create_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -923,7 +1372,7 @@ async def test_create_instance_field_headers_async(): def test_create_instance_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -959,7 +1408,7 @@ def test_create_instance_flattened(): def test_create_instance_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -977,7 +1426,7 @@ def test_create_instance_flattened_error(): @pytest.mark.asyncio async def test_create_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1018,7 +1467,7 @@ async def test_create_instance_flattened_async(): @pytest.mark.asyncio async def test_create_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1042,7 +1491,7 @@ async def test_create_instance_flattened_error_async(): ) def test_get_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1080,7 +1529,7 @@ def test_get_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -1098,7 +1547,7 @@ async def test_get_instance_async( request_type=bigtable_instance_admin.GetInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1141,7 +1590,7 @@ async def test_get_instance_async_from_dict(): def test_get_instance_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1171,7 +1620,7 @@ def test_get_instance_field_headers(): @pytest.mark.asyncio async def test_get_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1200,7 +1649,7 @@ async def test_get_instance_field_headers_async(): def test_get_instance_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1224,7 +1673,7 @@ def test_get_instance_flattened(): def test_get_instance_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1239,7 +1688,7 @@ def test_get_instance_flattened_error(): @pytest.mark.asyncio async def test_get_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1266,7 +1715,7 @@ async def test_get_instance_flattened_async(): @pytest.mark.asyncio async def test_get_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1287,7 +1736,7 @@ async def test_get_instance_flattened_error_async(): ) def test_list_instances(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1320,7 +1769,7 @@ def test_list_instances_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -1338,7 +1787,7 @@ async def test_list_instances_async( request_type=bigtable_instance_admin.ListInstancesRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1375,7 +1824,7 @@ async def test_list_instances_async_from_dict(): def test_list_instances_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1405,7 +1854,7 @@ def test_list_instances_field_headers(): @pytest.mark.asyncio async def test_list_instances_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1436,7 +1885,7 @@ async def test_list_instances_field_headers_async(): def test_list_instances_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1460,7 +1909,7 @@ def test_list_instances_flattened(): def test_list_instances_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1475,7 +1924,7 @@ def test_list_instances_flattened_error(): @pytest.mark.asyncio async def test_list_instances_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1504,7 +1953,7 @@ async def test_list_instances_flattened_async(): @pytest.mark.asyncio async def test_list_instances_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1525,7 +1974,7 @@ async def test_list_instances_flattened_error_async(): ) def test_update_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1563,7 +2012,7 @@ def test_update_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -1580,7 +2029,7 @@ async def test_update_instance_async( transport: str = "grpc_asyncio", request_type=instance.Instance ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1623,7 +2072,7 @@ async def test_update_instance_async_from_dict(): def test_update_instance_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1653,7 +2102,7 @@ def test_update_instance_field_headers(): @pytest.mark.asyncio async def test_update_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1689,7 +2138,7 @@ async def test_update_instance_field_headers_async(): ) def test_partial_update_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1718,7 +2167,7 @@ def test_partial_update_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -1738,7 +2187,7 @@ async def test_partial_update_instance_async( request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1772,7 +2221,7 @@ async def test_partial_update_instance_async_from_dict(): def test_partial_update_instance_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1804,7 +2253,7 @@ def test_partial_update_instance_field_headers(): @pytest.mark.asyncio async def test_partial_update_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1837,7 +2286,7 @@ async def test_partial_update_instance_field_headers_async(): def test_partial_update_instance_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1867,7 +2316,7 @@ def test_partial_update_instance_flattened(): def test_partial_update_instance_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1883,7 +2332,7 @@ def test_partial_update_instance_flattened_error(): @pytest.mark.asyncio async def test_partial_update_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1918,7 +2367,7 @@ async def test_partial_update_instance_flattened_async(): @pytest.mark.asyncio async def test_partial_update_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1940,7 +2389,7 @@ async def test_partial_update_instance_flattened_error_async(): ) def test_delete_instance(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1967,7 +2416,7 @@ def test_delete_instance_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -1985,7 +2434,7 @@ async def test_delete_instance_async( request_type=bigtable_instance_admin.DeleteInstanceRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2015,7 +2464,7 @@ async def test_delete_instance_async_from_dict(): def test_delete_instance_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2045,7 +2494,7 @@ def test_delete_instance_field_headers(): @pytest.mark.asyncio async def test_delete_instance_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2074,7 +2523,7 @@ async def test_delete_instance_field_headers_async(): def test_delete_instance_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2098,7 +2547,7 @@ def test_delete_instance_flattened(): def test_delete_instance_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2113,7 +2562,7 @@ def test_delete_instance_flattened_error(): @pytest.mark.asyncio async def test_delete_instance_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2140,7 +2589,7 @@ async def test_delete_instance_flattened_async(): @pytest.mark.asyncio async def test_delete_instance_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2161,7 +2610,7 @@ async def test_delete_instance_flattened_error_async(): ) def test_create_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2188,7 +2637,7 @@ def test_create_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -2206,7 +2655,7 @@ async def test_create_cluster_async( request_type=bigtable_instance_admin.CreateClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2238,7 +2687,7 @@ async def test_create_cluster_async_from_dict(): def test_create_cluster_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2268,7 +2717,7 @@ def test_create_cluster_field_headers(): @pytest.mark.asyncio async def test_create_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2299,7 +2748,7 @@ async def test_create_cluster_field_headers_async(): def test_create_cluster_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2331,7 +2780,7 @@ def test_create_cluster_flattened(): def test_create_cluster_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2348,7 +2797,7 @@ def test_create_cluster_flattened_error(): @pytest.mark.asyncio async def test_create_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2385,7 +2834,7 @@ async def test_create_cluster_flattened_async(): @pytest.mark.asyncio async def test_create_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2408,7 +2857,7 @@ async def test_create_cluster_flattened_error_async(): ) def test_get_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2446,7 +2895,7 @@ def test_get_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -2464,7 +2913,7 @@ async def test_get_cluster_async( request_type=bigtable_instance_admin.GetClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2507,7 +2956,7 @@ async def test_get_cluster_async_from_dict(): def test_get_cluster_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2537,7 +2986,7 @@ def test_get_cluster_field_headers(): @pytest.mark.asyncio async def test_get_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2566,7 +3015,7 @@ async def test_get_cluster_field_headers_async(): def test_get_cluster_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2590,7 +3039,7 @@ def test_get_cluster_flattened(): def test_get_cluster_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2605,7 +3054,7 @@ def test_get_cluster_flattened_error(): @pytest.mark.asyncio async def test_get_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2632,7 +3081,7 @@ async def test_get_cluster_flattened_async(): @pytest.mark.asyncio async def test_get_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2653,7 +3102,7 @@ async def test_get_cluster_flattened_error_async(): ) def test_list_clusters(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2686,7 +3135,7 @@ def test_list_clusters_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -2704,7 +3153,7 @@ async def test_list_clusters_async( request_type=bigtable_instance_admin.ListClustersRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2741,7 +3190,7 @@ async def test_list_clusters_async_from_dict(): def test_list_clusters_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2771,7 +3220,7 @@ def test_list_clusters_field_headers(): @pytest.mark.asyncio async def test_list_clusters_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2802,7 +3251,7 @@ async def test_list_clusters_field_headers_async(): def test_list_clusters_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2826,7 +3275,7 @@ def test_list_clusters_flattened(): def test_list_clusters_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2841,7 +3290,7 @@ def test_list_clusters_flattened_error(): @pytest.mark.asyncio async def test_list_clusters_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2870,7 +3319,7 @@ async def test_list_clusters_flattened_async(): @pytest.mark.asyncio async def test_list_clusters_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2891,7 +3340,7 @@ async def test_list_clusters_flattened_error_async(): ) def test_update_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2918,7 +3367,7 @@ def test_update_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -2935,7 +3384,7 @@ async def test_update_cluster_async( transport: str = "grpc_asyncio", request_type=instance.Cluster ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2967,7 +3416,7 @@ async def test_update_cluster_async_from_dict(): def test_update_cluster_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2997,7 +3446,7 @@ def test_update_cluster_field_headers(): @pytest.mark.asyncio async def test_update_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3035,7 +3484,7 @@ async def test_update_cluster_field_headers_async(): ) def test_partial_update_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3064,7 +3513,7 @@ def test_partial_update_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -3084,7 +3533,7 @@ async def test_partial_update_cluster_async( request_type=bigtable_instance_admin.PartialUpdateClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3118,7 +3567,7 @@ async def test_partial_update_cluster_async_from_dict(): def test_partial_update_cluster_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3150,7 +3599,7 @@ def test_partial_update_cluster_field_headers(): @pytest.mark.asyncio async def test_partial_update_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3183,7 +3632,7 @@ async def test_partial_update_cluster_field_headers_async(): def test_partial_update_cluster_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3213,7 +3662,7 @@ def test_partial_update_cluster_flattened(): def test_partial_update_cluster_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -3229,7 +3678,7 @@ def test_partial_update_cluster_flattened_error(): @pytest.mark.asyncio async def test_partial_update_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3264,7 +3713,7 @@ async def test_partial_update_cluster_flattened_async(): @pytest.mark.asyncio async def test_partial_update_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -3286,7 +3735,7 @@ async def test_partial_update_cluster_flattened_error_async(): ) def test_delete_cluster(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3313,7 +3762,7 @@ def test_delete_cluster_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -3331,7 +3780,7 @@ async def test_delete_cluster_async( request_type=bigtable_instance_admin.DeleteClusterRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3361,7 +3810,7 @@ async def test_delete_cluster_async_from_dict(): def test_delete_cluster_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3391,7 +3840,7 @@ def test_delete_cluster_field_headers(): @pytest.mark.asyncio async def test_delete_cluster_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3420,7 +3869,7 @@ async def test_delete_cluster_field_headers_async(): def test_delete_cluster_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3444,7 +3893,7 @@ def test_delete_cluster_flattened(): def test_delete_cluster_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -3459,7 +3908,7 @@ def test_delete_cluster_flattened_error(): @pytest.mark.asyncio async def test_delete_cluster_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3486,7 +3935,7 @@ async def test_delete_cluster_flattened_async(): @pytest.mark.asyncio async def test_delete_cluster_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -3507,7 +3956,7 @@ async def test_delete_cluster_flattened_error_async(): ) def test_create_app_profile(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3544,7 +3993,7 @@ def test_create_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -3564,7 +4013,7 @@ async def test_create_app_profile_async( request_type=bigtable_instance_admin.CreateAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3605,7 +4054,7 @@ async def test_create_app_profile_async_from_dict(): def test_create_app_profile_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3637,7 +4086,7 @@ def test_create_app_profile_field_headers(): @pytest.mark.asyncio async def test_create_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3668,7 +4117,7 @@ async def test_create_app_profile_field_headers_async(): def test_create_app_profile_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3702,7 +4151,7 @@ def test_create_app_profile_flattened(): def test_create_app_profile_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -3719,7 +4168,7 @@ def test_create_app_profile_flattened_error(): @pytest.mark.asyncio async def test_create_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3756,7 +4205,7 @@ async def test_create_app_profile_flattened_async(): @pytest.mark.asyncio async def test_create_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -3779,7 +4228,7 @@ async def test_create_app_profile_flattened_error_async(): ) def test_get_app_profile(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3814,7 +4263,7 @@ def test_get_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -3832,7 +4281,7 @@ async def test_get_app_profile_async( request_type=bigtable_instance_admin.GetAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3871,7 +4320,7 @@ async def test_get_app_profile_async_from_dict(): def test_get_app_profile_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3901,7 +4350,7 @@ def test_get_app_profile_field_headers(): @pytest.mark.asyncio async def test_get_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3930,7 +4379,7 @@ async def test_get_app_profile_field_headers_async(): def test_get_app_profile_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3954,7 +4403,7 @@ def test_get_app_profile_flattened(): def test_get_app_profile_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -3969,7 +4418,7 @@ def test_get_app_profile_flattened_error(): @pytest.mark.asyncio async def test_get_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3996,7 +4445,7 @@ async def test_get_app_profile_flattened_async(): @pytest.mark.asyncio async def test_get_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4017,7 +4466,7 @@ async def test_get_app_profile_flattened_error_async(): ) def test_list_app_profiles(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4051,7 +4500,7 @@ def test_list_app_profiles_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -4071,7 +4520,7 @@ async def test_list_app_profiles_async( request_type=bigtable_instance_admin.ListAppProfilesRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4110,7 +4559,7 @@ async def test_list_app_profiles_async_from_dict(): def test_list_app_profiles_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4142,7 +4591,7 @@ def test_list_app_profiles_field_headers(): @pytest.mark.asyncio async def test_list_app_profiles_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4175,7 +4624,7 @@ async def test_list_app_profiles_field_headers_async(): def test_list_app_profiles_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4201,7 +4650,7 @@ def test_list_app_profiles_flattened(): def test_list_app_profiles_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4216,7 +4665,7 @@ def test_list_app_profiles_flattened_error(): @pytest.mark.asyncio async def test_list_app_profiles_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4247,7 +4696,7 @@ async def test_list_app_profiles_flattened_async(): @pytest.mark.asyncio async def test_list_app_profiles_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4261,7 +4710,7 @@ async def test_list_app_profiles_flattened_error_async(): def test_list_app_profiles_pager(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport_name, ) @@ -4313,7 +4762,7 @@ def test_list_app_profiles_pager(transport_name: str = "grpc"): def test_list_app_profiles_pages(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport_name, ) @@ -4357,7 +4806,7 @@ def test_list_app_profiles_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_app_profiles_async_pager(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4409,7 +4858,7 @@ async def test_list_app_profiles_async_pager(): @pytest.mark.asyncio async def test_list_app_profiles_async_pages(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4466,7 +4915,7 @@ async def test_list_app_profiles_async_pages(): ) def test_update_app_profile(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4495,7 +4944,7 @@ def test_update_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -4515,7 +4964,7 @@ async def test_update_app_profile_async( request_type=bigtable_instance_admin.UpdateAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4549,7 +4998,7 @@ async def test_update_app_profile_async_from_dict(): def test_update_app_profile_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4581,7 +5030,7 @@ def test_update_app_profile_field_headers(): @pytest.mark.asyncio async def test_update_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4614,7 +5063,7 @@ async def test_update_app_profile_field_headers_async(): def test_update_app_profile_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4644,7 +5093,7 @@ def test_update_app_profile_flattened(): def test_update_app_profile_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4660,7 +5109,7 @@ def test_update_app_profile_flattened_error(): @pytest.mark.asyncio async def test_update_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4695,7 +5144,7 @@ async def test_update_app_profile_flattened_async(): @pytest.mark.asyncio async def test_update_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4717,7 +5166,7 @@ async def test_update_app_profile_flattened_error_async(): ) def test_delete_app_profile(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4746,7 +5195,7 @@ def test_delete_app_profile_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -4766,7 +5215,7 @@ async def test_delete_app_profile_async( request_type=bigtable_instance_admin.DeleteAppProfileRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4798,7 +5247,7 @@ async def test_delete_app_profile_async_from_dict(): def test_delete_app_profile_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4830,7 +5279,7 @@ def test_delete_app_profile_field_headers(): @pytest.mark.asyncio async def test_delete_app_profile_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4861,7 +5310,7 @@ async def test_delete_app_profile_field_headers_async(): def test_delete_app_profile_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4887,7 +5336,7 @@ def test_delete_app_profile_flattened(): def test_delete_app_profile_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4902,7 +5351,7 @@ def test_delete_app_profile_flattened_error(): @pytest.mark.asyncio async def test_delete_app_profile_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4931,7 +5380,7 @@ async def test_delete_app_profile_flattened_async(): @pytest.mark.asyncio async def test_delete_app_profile_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4952,7 +5401,7 @@ async def test_delete_app_profile_flattened_error_async(): ) def test_get_iam_policy(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4984,7 +5433,7 @@ def test_get_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -5001,7 +5450,7 @@ async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5038,7 +5487,7 @@ async def test_get_iam_policy_async_from_dict(): def test_get_iam_policy_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5068,7 +5517,7 @@ def test_get_iam_policy_field_headers(): @pytest.mark.asyncio async def test_get_iam_policy_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5097,7 +5546,7 @@ async def test_get_iam_policy_field_headers_async(): def test_get_iam_policy_from_dict_foreign(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: @@ -5114,7 +5563,7 @@ def test_get_iam_policy_from_dict_foreign(): def test_get_iam_policy_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5138,7 +5587,7 @@ def test_get_iam_policy_flattened(): def test_get_iam_policy_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5153,7 +5602,7 @@ def test_get_iam_policy_flattened_error(): @pytest.mark.asyncio async def test_get_iam_policy_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5180,7 +5629,7 @@ async def test_get_iam_policy_flattened_async(): @pytest.mark.asyncio async def test_get_iam_policy_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5201,7 +5650,7 @@ async def test_get_iam_policy_flattened_error_async(): ) def test_set_iam_policy(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5233,7 +5682,7 @@ def test_set_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -5250,7 +5699,7 @@ async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5287,7 +5736,7 @@ async def test_set_iam_policy_async_from_dict(): def test_set_iam_policy_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5317,7 +5766,7 @@ def test_set_iam_policy_field_headers(): @pytest.mark.asyncio async def test_set_iam_policy_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5346,7 +5795,7 @@ async def test_set_iam_policy_field_headers_async(): def test_set_iam_policy_from_dict_foreign(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: @@ -5364,7 +5813,7 @@ def test_set_iam_policy_from_dict_foreign(): def test_set_iam_policy_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5388,7 +5837,7 @@ def test_set_iam_policy_flattened(): def test_set_iam_policy_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5403,7 +5852,7 @@ def test_set_iam_policy_flattened_error(): @pytest.mark.asyncio async def test_set_iam_policy_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5430,7 +5879,7 @@ async def test_set_iam_policy_flattened_async(): @pytest.mark.asyncio async def test_set_iam_policy_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5451,7 +5900,7 @@ async def test_set_iam_policy_flattened_error_async(): ) def test_test_iam_permissions(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5483,7 +5932,7 @@ def test_test_iam_permissions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -5503,7 +5952,7 @@ async def test_test_iam_permissions_async( request_type=iam_policy_pb2.TestIamPermissionsRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5540,7 +5989,7 @@ async def test_test_iam_permissions_async_from_dict(): def test_test_iam_permissions_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5572,7 +6021,7 @@ def test_test_iam_permissions_field_headers(): @pytest.mark.asyncio async def test_test_iam_permissions_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5605,7 +6054,7 @@ async def test_test_iam_permissions_field_headers_async(): def test_test_iam_permissions_from_dict_foreign(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -5624,7 +6073,7 @@ def test_test_iam_permissions_from_dict_foreign(): def test_test_iam_permissions_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5654,7 +6103,7 @@ def test_test_iam_permissions_flattened(): def test_test_iam_permissions_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5670,7 +6119,7 @@ def test_test_iam_permissions_flattened_error(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5705,7 +6154,7 @@ async def test_test_iam_permissions_flattened_async(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5727,7 +6176,7 @@ async def test_test_iam_permissions_flattened_error_async(): ) def test_list_hot_tablets(request_type, transport: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5757,7 +6206,7 @@ def test_list_hot_tablets_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -5775,7 +6224,7 @@ async def test_list_hot_tablets_async( request_type=bigtable_instance_admin.ListHotTabletsRequest, ): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5810,7 +6259,7 @@ async def test_list_hot_tablets_async_from_dict(): def test_list_hot_tablets_field_headers(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5840,7 +6289,7 @@ def test_list_hot_tablets_field_headers(): @pytest.mark.asyncio async def test_list_hot_tablets_field_headers_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5871,7 +6320,7 @@ async def test_list_hot_tablets_field_headers_async(): def test_list_hot_tablets_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5895,7 +6344,7 @@ def test_list_hot_tablets_flattened(): def test_list_hot_tablets_flattened_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5910,7 +6359,7 @@ def test_list_hot_tablets_flattened_error(): @pytest.mark.asyncio async def test_list_hot_tablets_flattened_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5939,7 +6388,7 @@ async def test_list_hot_tablets_flattened_async(): @pytest.mark.asyncio async def test_list_hot_tablets_flattened_error_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5953,7 +6402,7 @@ async def test_list_hot_tablets_flattened_error_async(): def test_list_hot_tablets_pager(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport_name, ) @@ -6003,7 +6452,7 @@ def test_list_hot_tablets_pager(transport_name: str = "grpc"): def test_list_hot_tablets_pages(transport_name: str = "grpc"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport_name, ) @@ -6045,7 +6494,7 @@ def test_list_hot_tablets_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_hot_tablets_async_pager(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6095,7 +6544,7 @@ async def test_list_hot_tablets_async_pager(): @pytest.mark.asyncio async def test_list_hot_tablets_async_pages(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6150,7 +6599,7 @@ async def test_list_hot_tablets_async_pages(): ) def test_create_instance_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -6197,7 +6646,7 @@ def test_create_instance_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).create_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -6207,7 +6656,7 @@ def test_create_instance_rest_required_fields( jsonified_request["instanceId"] = "instance_id_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).create_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -6218,7 +6667,7 @@ def test_create_instance_rest_required_fields( assert jsonified_request["instanceId"] == "instance_id_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -6258,7 +6707,7 @@ def test_create_instance_rest_required_fields( def test_create_instance_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.create_instance._get_unset_required_fields({}) @@ -6278,7 +6727,7 @@ def test_create_instance_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_create_instance_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -6338,7 +6787,7 @@ def test_create_instance_rest_bad_request( transport: str = "rest", request_type=bigtable_instance_admin.CreateInstanceRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6360,7 +6809,7 @@ def test_create_instance_rest_bad_request( def test_create_instance_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -6401,7 +6850,7 @@ def test_create_instance_rest_flattened(): def test_create_instance_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6419,7 +6868,7 @@ def test_create_instance_rest_flattened_error(transport: str = "rest"): def test_create_instance_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -6432,7 +6881,7 @@ def test_create_instance_rest_error(): ) def test_get_instance_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -6491,7 +6940,7 @@ def test_get_instance_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -6500,7 +6949,7 @@ def test_get_instance_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -6509,7 +6958,7 @@ def test_get_instance_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -6551,7 +7000,7 @@ def test_get_instance_rest_required_fields( def test_get_instance_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.get_instance._get_unset_required_fields({}) @@ -6561,7 +7010,7 @@ def test_get_instance_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_get_instance_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -6617,7 +7066,7 @@ def test_get_instance_rest_bad_request( transport: str = "rest", request_type=bigtable_instance_admin.GetInstanceRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6639,7 +7088,7 @@ def test_get_instance_rest_bad_request( def test_get_instance_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -6679,7 +7128,7 @@ def test_get_instance_rest_flattened(): def test_get_instance_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6694,7 +7143,7 @@ def test_get_instance_rest_flattened_error(transport: str = "rest"): def test_get_instance_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -6707,7 +7156,7 @@ def test_get_instance_rest_error(): ) def test_list_instances_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -6762,7 +7211,7 @@ def test_list_instances_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_instances._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -6771,7 +7220,7 @@ def test_list_instances_rest_required_fields( jsonified_request["parent"] = "parent_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_instances._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set(("page_token",)) @@ -6782,7 +7231,7 @@ def test_list_instances_rest_required_fields( assert jsonified_request["parent"] == "parent_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -6826,7 +7275,7 @@ def test_list_instances_rest_required_fields( def test_list_instances_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.list_instances._get_unset_required_fields({}) @@ -6836,7 +7285,7 @@ def test_list_instances_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_list_instances_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -6896,7 +7345,7 @@ def test_list_instances_rest_bad_request( transport: str = "rest", request_type=bigtable_instance_admin.ListInstancesRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6918,7 +7367,7 @@ def test_list_instances_rest_bad_request( def test_list_instances_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -6958,7 +7407,7 @@ def test_list_instances_rest_flattened(): def test_list_instances_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6973,7 +7422,7 @@ def test_list_instances_rest_flattened_error(transport: str = "rest"): def test_list_instances_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -6986,7 +7435,7 @@ def test_list_instances_rest_error(): ) def test_update_instance_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -7043,7 +7492,7 @@ def test_update_instance_rest_required_fields(request_type=instance.Instance): # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).update_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -7052,7 +7501,7 @@ def test_update_instance_rest_required_fields(request_type=instance.Instance): jsonified_request["displayName"] = "display_name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).update_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -7061,7 +7510,7 @@ def test_update_instance_rest_required_fields(request_type=instance.Instance): assert jsonified_request["displayName"] == "display_name_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -7104,7 +7553,7 @@ def test_update_instance_rest_required_fields(request_type=instance.Instance): def test_update_instance_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.update_instance._get_unset_required_fields({}) @@ -7114,7 +7563,7 @@ def test_update_instance_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_update_instance_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -7168,7 +7617,7 @@ def test_update_instance_rest_bad_request( transport: str = "rest", request_type=instance.Instance ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -7190,7 +7639,7 @@ def test_update_instance_rest_bad_request( def test_update_instance_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -7203,7 +7652,7 @@ def test_update_instance_rest_error(): ) def test_partial_update_instance_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -7326,14 +7775,14 @@ def test_partial_update_instance_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).partial_update_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).partial_update_instance._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set(("update_mask",)) @@ -7342,7 +7791,7 @@ def test_partial_update_instance_rest_required_fields( # verify required fields with non-default values are left alone client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -7382,7 +7831,7 @@ def test_partial_update_instance_rest_required_fields( def test_partial_update_instance_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.partial_update_instance._get_unset_required_fields({}) @@ -7400,7 +7849,7 @@ def test_partial_update_instance_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_partial_update_instance_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -7461,7 +7910,7 @@ def test_partial_update_instance_rest_bad_request( request_type=bigtable_instance_admin.PartialUpdateInstanceRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -7483,7 +7932,7 @@ def test_partial_update_instance_rest_bad_request( def test_partial_update_instance_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -7523,7 +7972,7 @@ def test_partial_update_instance_rest_flattened(): def test_partial_update_instance_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -7539,7 +7988,7 @@ def test_partial_update_instance_rest_flattened_error(transport: str = "rest"): def test_partial_update_instance_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -7552,7 +8001,7 @@ def test_partial_update_instance_rest_error(): ) def test_delete_instance_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -7598,7 +8047,7 @@ def test_delete_instance_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).delete_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -7607,7 +8056,7 @@ def test_delete_instance_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).delete_instance._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -7616,7 +8065,7 @@ def test_delete_instance_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -7655,7 +8104,7 @@ def test_delete_instance_rest_required_fields( def test_delete_instance_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.delete_instance._get_unset_required_fields({}) @@ -7665,7 +8114,7 @@ def test_delete_instance_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_delete_instance_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -7715,7 +8164,7 @@ def test_delete_instance_rest_bad_request( transport: str = "rest", request_type=bigtable_instance_admin.DeleteInstanceRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -7737,7 +8186,7 @@ def test_delete_instance_rest_bad_request( def test_delete_instance_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -7775,7 +8224,7 @@ def test_delete_instance_rest_flattened(): def test_delete_instance_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -7790,7 +8239,7 @@ def test_delete_instance_rest_flattened_error(transport: str = "rest"): def test_delete_instance_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -7803,7 +8252,7 @@ def test_delete_instance_rest_error(): ) def test_create_cluster_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -7938,7 +8387,7 @@ def test_create_cluster_rest_required_fields( assert "clusterId" not in jsonified_request unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).create_cluster._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -7950,7 +8399,7 @@ def test_create_cluster_rest_required_fields( jsonified_request["clusterId"] = "cluster_id_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).create_cluster._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set(("cluster_id",)) @@ -7963,7 +8412,7 @@ def test_create_cluster_rest_required_fields( assert jsonified_request["clusterId"] == "cluster_id_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -8009,7 +8458,7 @@ def test_create_cluster_rest_required_fields( def test_create_cluster_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.create_cluster._get_unset_required_fields({}) @@ -8028,7 +8477,7 @@ def test_create_cluster_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_create_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -8088,7 +8537,7 @@ def test_create_cluster_rest_bad_request( transport: str = "rest", request_type=bigtable_instance_admin.CreateClusterRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8110,7 +8559,7 @@ def test_create_cluster_rest_bad_request( def test_create_cluster_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -8151,7 +8600,7 @@ def test_create_cluster_rest_flattened(): def test_create_cluster_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8168,7 +8617,7 @@ def test_create_cluster_rest_flattened_error(transport: str = "rest"): def test_create_cluster_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -8181,7 +8630,7 @@ def test_create_cluster_rest_error(): ) def test_get_cluster_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -8240,7 +8689,7 @@ def test_get_cluster_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_cluster._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -8249,7 +8698,7 @@ def test_get_cluster_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_cluster._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -8258,7 +8707,7 @@ def test_get_cluster_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -8300,7 +8749,7 @@ def test_get_cluster_rest_required_fields( def test_get_cluster_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.get_cluster._get_unset_required_fields({}) @@ -8310,7 +8759,7 @@ def test_get_cluster_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_get_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -8366,7 +8815,7 @@ def test_get_cluster_rest_bad_request( transport: str = "rest", request_type=bigtable_instance_admin.GetClusterRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8388,7 +8837,7 @@ def test_get_cluster_rest_bad_request( def test_get_cluster_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -8429,7 +8878,7 @@ def test_get_cluster_rest_flattened(): def test_get_cluster_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8444,7 +8893,7 @@ def test_get_cluster_rest_flattened_error(transport: str = "rest"): def test_get_cluster_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -8457,7 +8906,7 @@ def test_get_cluster_rest_error(): ) def test_list_clusters_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -8512,7 +8961,7 @@ def test_list_clusters_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_clusters._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -8521,7 +8970,7 @@ def test_list_clusters_rest_required_fields( jsonified_request["parent"] = "parent_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_clusters._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set(("page_token",)) @@ -8532,7 +8981,7 @@ def test_list_clusters_rest_required_fields( assert jsonified_request["parent"] == "parent_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -8574,7 +9023,7 @@ def test_list_clusters_rest_required_fields( def test_list_clusters_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.list_clusters._get_unset_required_fields({}) @@ -8584,7 +9033,7 @@ def test_list_clusters_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_list_clusters_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -8644,7 +9093,7 @@ def test_list_clusters_rest_bad_request( transport: str = "rest", request_type=bigtable_instance_admin.ListClustersRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8666,7 +9115,7 @@ def test_list_clusters_rest_bad_request( def test_list_clusters_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -8707,7 +9156,7 @@ def test_list_clusters_rest_flattened(): def test_list_clusters_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8722,7 +9171,7 @@ def test_list_clusters_rest_flattened_error(transport: str = "rest"): def test_list_clusters_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -8735,7 +9184,7 @@ def test_list_clusters_rest_error(): ) def test_update_cluster_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -8764,7 +9213,7 @@ def test_update_cluster_rest(request_type): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_update_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -8822,7 +9271,7 @@ def test_update_cluster_rest_bad_request( transport: str = "rest", request_type=instance.Cluster ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8844,7 +9293,7 @@ def test_update_cluster_rest_bad_request( def test_update_cluster_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -8857,7 +9306,7 @@ def test_update_cluster_rest_error(): ) def test_partial_update_cluster_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -8993,14 +9442,14 @@ def test_partial_update_cluster_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).partial_update_cluster._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).partial_update_cluster._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set(("update_mask",)) @@ -9009,7 +9458,7 @@ def test_partial_update_cluster_rest_required_fields( # verify required fields with non-default values are left alone client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -9049,7 +9498,7 @@ def test_partial_update_cluster_rest_required_fields( def test_partial_update_cluster_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.partial_update_cluster._get_unset_required_fields({}) @@ -9067,7 +9516,7 @@ def test_partial_update_cluster_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_partial_update_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -9128,7 +9577,7 @@ def test_partial_update_cluster_rest_bad_request( request_type=bigtable_instance_admin.PartialUpdateClusterRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -9152,7 +9601,7 @@ def test_partial_update_cluster_rest_bad_request( def test_partial_update_cluster_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -9195,7 +9644,7 @@ def test_partial_update_cluster_rest_flattened(): def test_partial_update_cluster_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -9211,7 +9660,7 @@ def test_partial_update_cluster_rest_flattened_error(transport: str = "rest"): def test_partial_update_cluster_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -9224,7 +9673,7 @@ def test_partial_update_cluster_rest_error(): ) def test_delete_cluster_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -9270,7 +9719,7 @@ def test_delete_cluster_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).delete_cluster._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -9279,7 +9728,7 @@ def test_delete_cluster_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).delete_cluster._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -9288,7 +9737,7 @@ def test_delete_cluster_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -9327,7 +9776,7 @@ def test_delete_cluster_rest_required_fields( def test_delete_cluster_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.delete_cluster._get_unset_required_fields({}) @@ -9337,7 +9786,7 @@ def test_delete_cluster_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_delete_cluster_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -9387,7 +9836,7 @@ def test_delete_cluster_rest_bad_request( transport: str = "rest", request_type=bigtable_instance_admin.DeleteClusterRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -9409,7 +9858,7 @@ def test_delete_cluster_rest_bad_request( def test_delete_cluster_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -9448,7 +9897,7 @@ def test_delete_cluster_rest_flattened(): def test_delete_cluster_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -9463,7 +9912,7 @@ def test_delete_cluster_rest_flattened_error(transport: str = "rest"): def test_delete_cluster_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -9476,7 +9925,7 @@ def test_delete_cluster_rest_error(): ) def test_create_app_profile_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -9617,7 +10066,7 @@ def test_create_app_profile_rest_required_fields( assert "appProfileId" not in jsonified_request unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).create_app_profile._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -9629,7 +10078,7 @@ def test_create_app_profile_rest_required_fields( jsonified_request["appProfileId"] = "app_profile_id_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).create_app_profile._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set( @@ -9647,7 +10096,7 @@ def test_create_app_profile_rest_required_fields( assert jsonified_request["appProfileId"] == "app_profile_id_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -9696,7 +10145,7 @@ def test_create_app_profile_rest_required_fields( def test_create_app_profile_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.create_app_profile._get_unset_required_fields({}) @@ -9720,7 +10169,7 @@ def test_create_app_profile_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_create_app_profile_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -9777,7 +10226,7 @@ def test_create_app_profile_rest_bad_request( request_type=bigtable_instance_admin.CreateAppProfileRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -9799,7 +10248,7 @@ def test_create_app_profile_rest_bad_request( def test_create_app_profile_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -9843,7 +10292,7 @@ def test_create_app_profile_rest_flattened(): def test_create_app_profile_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -9860,7 +10309,7 @@ def test_create_app_profile_rest_flattened_error(transport: str = "rest"): def test_create_app_profile_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -9873,7 +10322,7 @@ def test_create_app_profile_rest_error(): ) def test_get_app_profile_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -9929,7 +10378,7 @@ def test_get_app_profile_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_app_profile._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -9938,7 +10387,7 @@ def test_get_app_profile_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_app_profile._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -9947,7 +10396,7 @@ def test_get_app_profile_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -9989,7 +10438,7 @@ def test_get_app_profile_rest_required_fields( def test_get_app_profile_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.get_app_profile._get_unset_required_fields({}) @@ -9999,7 +10448,7 @@ def test_get_app_profile_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_get_app_profile_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -10055,7 +10504,7 @@ def test_get_app_profile_rest_bad_request( transport: str = "rest", request_type=bigtable_instance_admin.GetAppProfileRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10077,7 +10526,7 @@ def test_get_app_profile_rest_bad_request( def test_get_app_profile_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -10121,7 +10570,7 @@ def test_get_app_profile_rest_flattened(): def test_get_app_profile_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10136,7 +10585,7 @@ def test_get_app_profile_rest_flattened_error(transport: str = "rest"): def test_get_app_profile_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -10149,7 +10598,7 @@ def test_get_app_profile_rest_error(): ) def test_list_app_profiles_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -10202,7 +10651,7 @@ def test_list_app_profiles_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_app_profiles._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -10211,7 +10660,7 @@ def test_list_app_profiles_rest_required_fields( jsonified_request["parent"] = "parent_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_app_profiles._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set( @@ -10227,7 +10676,7 @@ def test_list_app_profiles_rest_required_fields( assert jsonified_request["parent"] == "parent_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -10271,7 +10720,7 @@ def test_list_app_profiles_rest_required_fields( def test_list_app_profiles_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.list_app_profiles._get_unset_required_fields({}) @@ -10289,7 +10738,7 @@ def test_list_app_profiles_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_list_app_profiles_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -10349,7 +10798,7 @@ def test_list_app_profiles_rest_bad_request( transport: str = "rest", request_type=bigtable_instance_admin.ListAppProfilesRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10371,7 +10820,7 @@ def test_list_app_profiles_rest_bad_request( def test_list_app_profiles_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -10413,7 +10862,7 @@ def test_list_app_profiles_rest_flattened(): def test_list_app_profiles_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10428,7 +10877,7 @@ def test_list_app_profiles_rest_flattened_error(transport: str = "rest"): def test_list_app_profiles_rest_pager(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10498,7 +10947,7 @@ def test_list_app_profiles_rest_pager(transport: str = "rest"): ) def test_update_app_profile_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -10630,14 +11079,14 @@ def test_update_app_profile_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).update_app_profile._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).update_app_profile._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set( @@ -10651,7 +11100,7 @@ def test_update_app_profile_rest_required_fields( # verify required fields with non-default values are left alone client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -10691,7 +11140,7 @@ def test_update_app_profile_rest_required_fields( def test_update_app_profile_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.update_app_profile._get_unset_required_fields({}) @@ -10714,7 +11163,7 @@ def test_update_app_profile_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_update_app_profile_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -10775,7 +11224,7 @@ def test_update_app_profile_rest_bad_request( request_type=bigtable_instance_admin.UpdateAppProfileRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10801,7 +11250,7 @@ def test_update_app_profile_rest_bad_request( def test_update_app_profile_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -10846,7 +11295,7 @@ def test_update_app_profile_rest_flattened(): def test_update_app_profile_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10862,7 +11311,7 @@ def test_update_app_profile_rest_flattened_error(transport: str = "rest"): def test_update_app_profile_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -10875,7 +11324,7 @@ def test_update_app_profile_rest_error(): ) def test_delete_app_profile_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -10923,7 +11372,7 @@ def test_delete_app_profile_rest_required_fields( assert "ignoreWarnings" not in jsonified_request unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).delete_app_profile._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -10935,7 +11384,7 @@ def test_delete_app_profile_rest_required_fields( jsonified_request["ignoreWarnings"] = True unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).delete_app_profile._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set(("ignore_warnings",)) @@ -10948,7 +11397,7 @@ def test_delete_app_profile_rest_required_fields( assert jsonified_request["ignoreWarnings"] == True client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -10993,7 +11442,7 @@ def test_delete_app_profile_rest_required_fields( def test_delete_app_profile_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.delete_app_profile._get_unset_required_fields({}) @@ -11011,7 +11460,7 @@ def test_delete_app_profile_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_delete_app_profile_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -11062,7 +11511,7 @@ def test_delete_app_profile_rest_bad_request( request_type=bigtable_instance_admin.DeleteAppProfileRequest, ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11084,7 +11533,7 @@ def test_delete_app_profile_rest_bad_request( def test_delete_app_profile_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -11126,7 +11575,7 @@ def test_delete_app_profile_rest_flattened(): def test_delete_app_profile_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11141,7 +11590,7 @@ def test_delete_app_profile_rest_flattened_error(transport: str = "rest"): def test_delete_app_profile_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -11154,7 +11603,7 @@ def test_delete_app_profile_rest_error(): ) def test_get_iam_policy_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -11205,7 +11654,7 @@ def test_get_iam_policy_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_iam_policy._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -11214,7 +11663,7 @@ def test_get_iam_policy_rest_required_fields( jsonified_request["resource"] = "resource_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_iam_policy._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -11223,7 +11672,7 @@ def test_get_iam_policy_rest_required_fields( assert jsonified_request["resource"] == "resource_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -11264,7 +11713,7 @@ def test_get_iam_policy_rest_required_fields( def test_get_iam_policy_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.get_iam_policy._get_unset_required_fields({}) @@ -11274,7 +11723,7 @@ def test_get_iam_policy_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_get_iam_policy_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -11328,7 +11777,7 @@ def test_get_iam_policy_rest_bad_request( transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11350,7 +11799,7 @@ def test_get_iam_policy_rest_bad_request( def test_get_iam_policy_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -11390,7 +11839,7 @@ def test_get_iam_policy_rest_flattened(): def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11405,7 +11854,7 @@ def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): def test_get_iam_policy_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -11418,7 +11867,7 @@ def test_get_iam_policy_rest_error(): ) def test_set_iam_policy_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -11469,7 +11918,7 @@ def test_set_iam_policy_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).set_iam_policy._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -11478,7 +11927,7 @@ def test_set_iam_policy_rest_required_fields( jsonified_request["resource"] = "resource_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).set_iam_policy._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -11487,7 +11936,7 @@ def test_set_iam_policy_rest_required_fields( assert jsonified_request["resource"] == "resource_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -11528,7 +11977,7 @@ def test_set_iam_policy_rest_required_fields( def test_set_iam_policy_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.set_iam_policy._get_unset_required_fields({}) @@ -11546,7 +11995,7 @@ def test_set_iam_policy_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_set_iam_policy_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -11600,7 +12049,7 @@ def test_set_iam_policy_rest_bad_request( transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11622,7 +12071,7 @@ def test_set_iam_policy_rest_bad_request( def test_set_iam_policy_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -11662,7 +12111,7 @@ def test_set_iam_policy_rest_flattened(): def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11677,7 +12126,7 @@ def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): def test_set_iam_policy_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -11690,7 +12139,7 @@ def test_set_iam_policy_rest_error(): ) def test_test_iam_permissions_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -11740,7 +12189,7 @@ def test_test_iam_permissions_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).test_iam_permissions._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -11750,7 +12199,7 @@ def test_test_iam_permissions_rest_required_fields( jsonified_request["permissions"] = "permissions_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).test_iam_permissions._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -11761,7 +12210,7 @@ def test_test_iam_permissions_rest_required_fields( assert jsonified_request["permissions"] == "permissions_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -11802,7 +12251,7 @@ def test_test_iam_permissions_rest_required_fields( def test_test_iam_permissions_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) @@ -11820,7 +12269,7 @@ def test_test_iam_permissions_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_test_iam_permissions_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -11876,7 +12325,7 @@ def test_test_iam_permissions_rest_bad_request( transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11898,7 +12347,7 @@ def test_test_iam_permissions_rest_bad_request( def test_test_iam_permissions_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -11939,7 +12388,7 @@ def test_test_iam_permissions_rest_flattened(): def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11955,7 +12404,7 @@ def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): def test_test_iam_permissions_rest_error(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -11968,7 +12417,7 @@ def test_test_iam_permissions_rest_error(): ) def test_list_hot_tablets_rest(request_type): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -12019,7 +12468,7 @@ def test_list_hot_tablets_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_hot_tablets._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -12028,7 +12477,7 @@ def test_list_hot_tablets_rest_required_fields( jsonified_request["parent"] = "parent_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_hot_tablets._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set( @@ -12046,7 +12495,7 @@ def test_list_hot_tablets_rest_required_fields( assert jsonified_request["parent"] == "parent_value" client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -12090,7 +12539,7 @@ def test_list_hot_tablets_rest_required_fields( def test_list_hot_tablets_rest_unset_required_fields(): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.list_hot_tablets._get_unset_required_fields({}) @@ -12110,7 +12559,7 @@ def test_list_hot_tablets_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_list_hot_tablets_rest_interceptors(null_interceptor): transport = transports.BigtableInstanceAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableInstanceAdminRestInterceptor(), @@ -12170,7 +12619,7 @@ def test_list_hot_tablets_rest_bad_request( transport: str = "rest", request_type=bigtable_instance_admin.ListHotTabletsRequest ): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -12192,7 +12641,7 @@ def test_list_hot_tablets_rest_bad_request( def test_list_hot_tablets_rest_flattened(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -12236,7 +12685,7 @@ def test_list_hot_tablets_rest_flattened(): def test_list_hot_tablets_rest_flattened_error(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -12251,7 +12700,7 @@ def test_list_hot_tablets_rest_flattened_error(transport: str = "rest"): def test_list_hot_tablets_rest_pager(transport: str = "rest"): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -12317,17 +12766,17 @@ def test_list_hot_tablets_rest_pager(transport: str = "rest"): def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) with pytest.raises(ValueError): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) with pytest.raises(ValueError): client = BigtableInstanceAdminClient( @@ -12337,7 +12786,7 @@ def test_credentials_transport_error(): # It is an error to provide an api_key and a transport instance. transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) options = client_options.ClientOptions() options.api_key = "api_key" @@ -12348,16 +12797,17 @@ def test_credentials_transport_error(): ) # It is an error to provide an api_key and a credential. - options = mock.Mock() + options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = BigtableInstanceAdminClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # It is an error to provide scopes and a transport instance. transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) with pytest.raises(ValueError): client = BigtableInstanceAdminClient( @@ -12369,7 +12819,7 @@ def test_credentials_transport_error(): def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) client = BigtableInstanceAdminClient(transport=transport) assert client.transport is transport @@ -12378,13 +12828,13 @@ def test_transport_instance(): def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.BigtableInstanceAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) channel = transport.grpc_channel assert channel transport = transports.BigtableInstanceAdminGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) channel = transport.grpc_channel assert channel @@ -12401,7 +12851,7 @@ def test_transport_get_channel(): def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) + adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) transport_class() adc.assert_called_once() @@ -12415,7 +12865,7 @@ def test_transport_adc(transport_class): ) def test_transport_kind(transport_name): transport = BigtableInstanceAdminClient.get_transport_class(transport_name)( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) assert transport.kind == transport_name @@ -12423,7 +12873,7 @@ def test_transport_kind(transport_name): def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) assert isinstance( client.transport, @@ -12435,7 +12885,7 @@ def test_bigtable_instance_admin_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.BigtableInstanceAdminTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), credentials_file="credentials.json", ) @@ -12447,7 +12897,7 @@ def test_bigtable_instance_admin_base_transport(): ) as Transport: Transport.return_value = None transport = transports.BigtableInstanceAdminTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Every method on the transport should just blindly @@ -12504,7 +12954,7 @@ def test_bigtable_instance_admin_base_transport_with_credentials_file(): "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + load_creds.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) transport = transports.BigtableInstanceAdminTransport( credentials_file="credentials.json", quota_project_id="octopus", @@ -12531,7 +12981,7 @@ def test_bigtable_instance_admin_base_transport_with_adc(): "google.cloud.bigtable_admin_v2.services.bigtable_instance_admin.transports.BigtableInstanceAdminTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) + adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) transport = transports.BigtableInstanceAdminTransport() adc.assert_called_once() @@ -12539,7 +12989,7 @@ def test_bigtable_instance_admin_base_transport_with_adc(): def test_bigtable_instance_admin_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) + adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) BigtableInstanceAdminClient() adc.assert_called_once_with( scopes=None, @@ -12567,7 +13017,7 @@ def test_bigtable_instance_admin_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) + adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], @@ -12624,7 +13074,7 @@ def test_bigtable_instance_admin_transport_create_channel( ) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: - creds = ga_credentials.AnonymousCredentials() + creds = _AnonymousCredentialsWithUniverseDomain() adc.return_value = (creds, None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) @@ -12662,7 +13112,7 @@ def test_bigtable_instance_admin_transport_create_channel( def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls( transport_class, ): - cred = ga_credentials.AnonymousCredentials() + cred = _AnonymousCredentialsWithUniverseDomain() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: @@ -12700,7 +13150,7 @@ def test_bigtable_instance_admin_grpc_transport_client_cert_source_for_mtls( def test_bigtable_instance_admin_http_transport_client_cert_source_for_mtls(): - cred = ga_credentials.AnonymousCredentials() + cred = _AnonymousCredentialsWithUniverseDomain() with mock.patch( "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" ) as mock_configure_mtls_channel: @@ -12712,7 +13162,7 @@ def test_bigtable_instance_admin_http_transport_client_cert_source_for_mtls(): def test_bigtable_instance_admin_rest_lro_client(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) transport = client.transport @@ -12737,7 +13187,7 @@ def test_bigtable_instance_admin_rest_lro_client(): ) def test_bigtable_instance_admin_host_no_port(transport_name): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com" ), @@ -12760,7 +13210,7 @@ def test_bigtable_instance_admin_host_no_port(transport_name): ) def test_bigtable_instance_admin_host_with_port(transport_name): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com:8000" ), @@ -12780,8 +13230,8 @@ def test_bigtable_instance_admin_host_with_port(transport_name): ], ) def test_bigtable_instance_admin_client_transport_session_collision(transport_name): - creds1 = ga_credentials.AnonymousCredentials() - creds2 = ga_credentials.AnonymousCredentials() + creds1 = _AnonymousCredentialsWithUniverseDomain() + creds2 = _AnonymousCredentialsWithUniverseDomain() client1 = BigtableInstanceAdminClient( credentials=creds1, transport=transport_name, @@ -12905,7 +13355,7 @@ def test_bigtable_instance_admin_transport_channel_mtls_with_client_cert_source( mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel - cred = ga_credentials.AnonymousCredentials() + cred = _AnonymousCredentialsWithUniverseDomain() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) @@ -12983,7 +13433,7 @@ def test_bigtable_instance_admin_transport_channel_mtls_with_adc(transport_class def test_bigtable_instance_admin_grpc_lro_client(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) transport = client.transport @@ -13000,7 +13450,7 @@ def test_bigtable_instance_admin_grpc_lro_client(): def test_bigtable_instance_admin_grpc_lro_async_client(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc_asyncio", ) transport = client.transport @@ -13292,7 +13742,7 @@ def test_client_with_default_client_info(): transports.BigtableInstanceAdminTransport, "_prep_wrapped_messages" ) as prep: client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -13302,7 +13752,7 @@ def test_client_with_default_client_info(): ) as prep: transport_class = BigtableInstanceAdminClient.get_transport_class() transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -13311,7 +13761,7 @@ def test_client_with_default_client_info(): @pytest.mark.asyncio async def test_transport_close_async(): client = BigtableInstanceAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc_asyncio", ) with mock.patch.object( @@ -13330,7 +13780,7 @@ def test_transport_close(): for transport, close_name in transports.items(): client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport ) with mock.patch.object( type(getattr(client.transport, close_name)), "close" @@ -13347,7 +13797,7 @@ def test_client_ctx(): ] for transport in transports: client = BigtableInstanceAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport ) # Test client calls underlying transport. with mock.patch.object(type(client.transport), "close") as close: @@ -13381,7 +13831,9 @@ def test_api_key_credentials(client_class, transport_class): patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, diff --git a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py index b29dc5106..f8409fb58 100644 --- a/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py +++ b/tests/unit/gapic/bigtable_admin_v2/test_bigtable_table_admin.py @@ -29,6 +29,7 @@ import json import math import pytest +from google.api_core import api_core_version from proto.marshal.rules.dates import DurationRule, TimestampRule from proto.marshal.rules import wrappers from requests import Response @@ -88,6 +89,29 @@ def modify_default_endpoint(client): ) +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +# Anonymous Credentials with universe domain property. If no universe domain is provided, then +# the default universe domain is "googleapis.com". +class _AnonymousCredentialsWithUniverseDomain(ga_credentials.AnonymousCredentials): + def __init__(self, universe_domain="googleapis.com"): + super(_AnonymousCredentialsWithUniverseDomain, self).__init__() + self._universe_domain = universe_domain + + @property + def universe_domain(self): + return self._universe_domain + + def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" @@ -118,6 +142,279 @@ def test__get_default_mtls_endpoint(): ) +def test__read_environment_variables(): + assert BigtableTableAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert BigtableTableAdminClient._read_environment_variables() == ( + True, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert BigtableTableAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + BigtableTableAdminClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert BigtableTableAdminClient._read_environment_variables() == ( + False, + "never", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert BigtableTableAdminClient._read_environment_variables() == ( + False, + "always", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert BigtableTableAdminClient._read_environment_variables() == ( + False, + "auto", + None, + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableTableAdminClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert BigtableTableAdminClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert BigtableTableAdminClient._get_client_cert_source(None, False) is None + assert ( + BigtableTableAdminClient._get_client_cert_source( + mock_provided_cert_source, False + ) + is None + ) + assert ( + BigtableTableAdminClient._get_client_cert_source( + mock_provided_cert_source, True + ) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + BigtableTableAdminClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + BigtableTableAdminClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + BigtableTableAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminClient), +) +@mock.patch.object( + BigtableTableAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE + default_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + BigtableTableAdminClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + BigtableTableAdminClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableTableAdminClient._get_api_endpoint(None, None, default_universe, "auto") + == default_endpoint + ) + assert ( + BigtableTableAdminClient._get_api_endpoint( + None, None, default_universe, "always" + ) + == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableTableAdminClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == BigtableTableAdminClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableTableAdminClient._get_api_endpoint(None, None, mock_universe, "never") + == mock_endpoint + ) + assert ( + BigtableTableAdminClient._get_api_endpoint( + None, None, default_universe, "never" + ) + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableTableAdminClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + BigtableTableAdminClient._get_universe_domain( + client_universe_domain, universe_domain_env + ) + == client_universe_domain + ) + assert ( + BigtableTableAdminClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + BigtableTableAdminClient._get_universe_domain(None, None) + == BigtableTableAdminClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + BigtableTableAdminClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableTableAdminClient, transports.BigtableTableAdminGrpcTransport, "grpc"), + (BigtableTableAdminClient, transports.BigtableTableAdminRestTransport, "rest"), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # Test the case when there is a universe mismatch from the credentials. + client = client_class( + transport=transport_class( + credentials=_AnonymousCredentialsWithUniverseDomain( + universe_domain="foo.com" + ) + ) + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor, _ = [ + int(part) for part in api_core_version.__version__.split(".") + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=_AnonymousCredentialsWithUniverseDomain(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + @pytest.mark.parametrize( "client_class,transport_name", [ @@ -129,7 +426,7 @@ def test__get_default_mtls_endpoint(): def test_bigtable_table_admin_client_from_service_account_info( client_class, transport_name ): - creds = ga_credentials.AnonymousCredentials() + creds = _AnonymousCredentialsWithUniverseDomain() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: @@ -183,7 +480,7 @@ def test_bigtable_table_admin_client_service_account_always_use_jwt( def test_bigtable_table_admin_client_from_service_account_file( client_class, transport_name ): - creds = ga_credentials.AnonymousCredentials() + creds = _AnonymousCredentialsWithUniverseDomain() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: @@ -233,20 +530,22 @@ def test_bigtable_table_admin_client_get_transport_class(): ) @mock.patch.object( BigtableTableAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminClient), ) @mock.patch.object( BigtableTableAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminAsyncClient), ) def test_bigtable_table_admin_client_client_options( client_class, transport_class, transport_name ): # Check that if channel is provided we won't create a new one. with mock.patch.object(BigtableTableAdminClient, "get_transport_class") as gtc: - transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + transport = transport_class( + credentials=_AnonymousCredentialsWithUniverseDomain() + ) client = client_class(transport=transport) gtc.assert_not_called() @@ -281,7 +580,9 @@ def test_bigtable_table_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -311,15 +612,23 @@ def test_bigtable_table_admin_client_client_options( # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): + with pytest.raises(MutualTLSChannelError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): - with pytest.raises(ValueError): + with pytest.raises(ValueError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") @@ -329,7 +638,9 @@ def test_bigtable_table_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", @@ -347,7 +658,9 @@ def test_bigtable_table_admin_client_client_options( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -400,13 +713,13 @@ def test_bigtable_table_admin_client_client_options( ) @mock.patch.object( BigtableTableAdminClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminClient), ) @mock.patch.object( BigtableTableAdminAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableTableAdminAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_bigtable_table_admin_client_mtls_env_auto( @@ -429,7 +742,9 @@ def test_bigtable_table_admin_client_mtls_env_auto( if use_client_cert_env == "false": expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -461,7 +776,9 @@ def test_bigtable_table_admin_client_mtls_env_auto( return_value=client_cert_source_callback, ): if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -495,7 +812,9 @@ def test_bigtable_table_admin_client_mtls_env_auto( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -585,6 +904,118 @@ def test_bigtable_table_admin_client_get_mtls_endpoint_and_cert_source(client_cl assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize( + "client_class", [BigtableTableAdminClient, BigtableTableAdminAsyncClient] +) +@mock.patch.object( + BigtableTableAdminClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminClient), +) +@mock.patch.object( + BigtableTableAdminAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableTableAdminAsyncClient), +) +def test_bigtable_table_admin_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = BigtableTableAdminClient._DEFAULT_UNIVERSE + default_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableTableAdminClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=_AnonymousCredentialsWithUniverseDomain()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=_AnonymousCredentialsWithUniverseDomain()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), + ) + else: + client = client_class( + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), + ) + assert client.api_endpoint == default_endpoint + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -611,7 +1042,9 @@ def test_bigtable_table_admin_client_client_options_scopes( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, @@ -656,7 +1089,9 @@ def test_bigtable_table_admin_client_client_options_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -716,7 +1151,9 @@ def test_bigtable_table_admin_client_create_channel_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -733,8 +1170,8 @@ def test_bigtable_table_admin_client_create_channel_credentials_file( ) as adc, mock.patch.object( grpc_helpers, "create_channel" ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - file_creds = ga_credentials.AnonymousCredentials() + creds = _AnonymousCredentialsWithUniverseDomain() + file_creds = _AnonymousCredentialsWithUniverseDomain() load_creds.return_value = (file_creds, None) adc.return_value = (creds, None) client = client_class(client_options=options, transport=transport_name) @@ -770,7 +1207,7 @@ def test_bigtable_table_admin_client_create_channel_credentials_file( ) def test_create_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -804,7 +1241,7 @@ def test_create_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -822,7 +1259,7 @@ async def test_create_table_async( request_type=bigtable_table_admin.CreateTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -861,7 +1298,7 @@ async def test_create_table_async_from_dict(): def test_create_table_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -891,7 +1328,7 @@ def test_create_table_field_headers(): @pytest.mark.asyncio async def test_create_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -920,7 +1357,7 @@ async def test_create_table_field_headers_async(): def test_create_table_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -952,7 +1389,7 @@ def test_create_table_flattened(): def test_create_table_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -969,7 +1406,7 @@ def test_create_table_flattened_error(): @pytest.mark.asyncio async def test_create_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1004,7 +1441,7 @@ async def test_create_table_flattened_async(): @pytest.mark.asyncio async def test_create_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1027,7 +1464,7 @@ async def test_create_table_flattened_error_async(): ) def test_create_table_from_snapshot(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1056,7 +1493,7 @@ def test_create_table_from_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -1076,7 +1513,7 @@ async def test_create_table_from_snapshot_async( request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1110,7 +1547,7 @@ async def test_create_table_from_snapshot_async_from_dict(): def test_create_table_from_snapshot_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1142,7 +1579,7 @@ def test_create_table_from_snapshot_field_headers(): @pytest.mark.asyncio async def test_create_table_from_snapshot_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1175,7 +1612,7 @@ async def test_create_table_from_snapshot_field_headers_async(): def test_create_table_from_snapshot_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1209,7 +1646,7 @@ def test_create_table_from_snapshot_flattened(): def test_create_table_from_snapshot_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1226,7 +1663,7 @@ def test_create_table_from_snapshot_flattened_error(): @pytest.mark.asyncio async def test_create_table_from_snapshot_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1265,7 +1702,7 @@ async def test_create_table_from_snapshot_flattened_async(): @pytest.mark.asyncio async def test_create_table_from_snapshot_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1288,7 +1725,7 @@ async def test_create_table_from_snapshot_flattened_error_async(): ) def test_list_tables(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1318,7 +1755,7 @@ def test_list_tables_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -1335,7 +1772,7 @@ async def test_list_tables_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.ListTablesRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1370,7 +1807,7 @@ async def test_list_tables_async_from_dict(): def test_list_tables_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1400,7 +1837,7 @@ def test_list_tables_field_headers(): @pytest.mark.asyncio async def test_list_tables_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1431,7 +1868,7 @@ async def test_list_tables_field_headers_async(): def test_list_tables_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1455,7 +1892,7 @@ def test_list_tables_flattened(): def test_list_tables_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1470,7 +1907,7 @@ def test_list_tables_flattened_error(): @pytest.mark.asyncio async def test_list_tables_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1499,7 +1936,7 @@ async def test_list_tables_flattened_async(): @pytest.mark.asyncio async def test_list_tables_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1513,7 +1950,7 @@ async def test_list_tables_flattened_error_async(): def test_list_tables_pager(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport_name, ) @@ -1563,7 +2000,7 @@ def test_list_tables_pager(transport_name: str = "grpc"): def test_list_tables_pages(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport_name, ) @@ -1605,7 +2042,7 @@ def test_list_tables_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_tables_async_pager(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1655,7 +2092,7 @@ async def test_list_tables_async_pager(): @pytest.mark.asyncio async def test_list_tables_async_pages(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1710,7 +2147,7 @@ async def test_list_tables_async_pages(): ) def test_get_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1744,7 +2181,7 @@ def test_get_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -1761,7 +2198,7 @@ async def test_get_table_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetTableRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1800,7 +2237,7 @@ async def test_get_table_async_from_dict(): def test_get_table_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1830,7 +2267,7 @@ def test_get_table_field_headers(): @pytest.mark.asyncio async def test_get_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1859,7 +2296,7 @@ async def test_get_table_field_headers_async(): def test_get_table_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1883,7 +2320,7 @@ def test_get_table_flattened(): def test_get_table_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1898,7 +2335,7 @@ def test_get_table_flattened_error(): @pytest.mark.asyncio async def test_get_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1925,7 +2362,7 @@ async def test_get_table_flattened_async(): @pytest.mark.asyncio async def test_get_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1946,7 +2383,7 @@ async def test_get_table_flattened_error_async(): ) def test_update_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1973,7 +2410,7 @@ def test_update_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -1991,7 +2428,7 @@ async def test_update_table_async( request_type=bigtable_table_admin.UpdateTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2023,7 +2460,7 @@ async def test_update_table_async_from_dict(): def test_update_table_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2053,7 +2490,7 @@ def test_update_table_field_headers(): @pytest.mark.asyncio async def test_update_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2084,7 +2521,7 @@ async def test_update_table_field_headers_async(): def test_update_table_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2112,7 +2549,7 @@ def test_update_table_flattened(): def test_update_table_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2128,7 +2565,7 @@ def test_update_table_flattened_error(): @pytest.mark.asyncio async def test_update_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2161,7 +2598,7 @@ async def test_update_table_flattened_async(): @pytest.mark.asyncio async def test_update_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2183,7 +2620,7 @@ async def test_update_table_flattened_error_async(): ) def test_delete_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2210,7 +2647,7 @@ def test_delete_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -2228,7 +2665,7 @@ async def test_delete_table_async( request_type=bigtable_table_admin.DeleteTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2258,7 +2695,7 @@ async def test_delete_table_async_from_dict(): def test_delete_table_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2288,7 +2725,7 @@ def test_delete_table_field_headers(): @pytest.mark.asyncio async def test_delete_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2317,7 +2754,7 @@ async def test_delete_table_field_headers_async(): def test_delete_table_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2341,7 +2778,7 @@ def test_delete_table_flattened(): def test_delete_table_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2356,7 +2793,7 @@ def test_delete_table_flattened_error(): @pytest.mark.asyncio async def test_delete_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2383,7 +2820,7 @@ async def test_delete_table_flattened_async(): @pytest.mark.asyncio async def test_delete_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2404,7 +2841,7 @@ async def test_delete_table_flattened_error_async(): ) def test_undelete_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2431,7 +2868,7 @@ def test_undelete_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -2449,7 +2886,7 @@ async def test_undelete_table_async( request_type=bigtable_table_admin.UndeleteTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2481,7 +2918,7 @@ async def test_undelete_table_async_from_dict(): def test_undelete_table_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2511,7 +2948,7 @@ def test_undelete_table_field_headers(): @pytest.mark.asyncio async def test_undelete_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2542,7 +2979,7 @@ async def test_undelete_table_field_headers_async(): def test_undelete_table_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2566,7 +3003,7 @@ def test_undelete_table_flattened(): def test_undelete_table_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2581,7 +3018,7 @@ def test_undelete_table_flattened_error(): @pytest.mark.asyncio async def test_undelete_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2610,7 +3047,7 @@ async def test_undelete_table_flattened_async(): @pytest.mark.asyncio async def test_undelete_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2631,7 +3068,7 @@ async def test_undelete_table_flattened_error_async(): ) def test_modify_column_families(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2667,7 +3104,7 @@ def test_modify_column_families_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -2687,7 +3124,7 @@ async def test_modify_column_families_async( request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2728,7 +3165,7 @@ async def test_modify_column_families_async_from_dict(): def test_modify_column_families_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2760,7 +3197,7 @@ def test_modify_column_families_field_headers(): @pytest.mark.asyncio async def test_modify_column_families_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2791,7 +3228,7 @@ async def test_modify_column_families_field_headers_async(): def test_modify_column_families_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2827,7 +3264,7 @@ def test_modify_column_families_flattened(): def test_modify_column_families_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2847,7 +3284,7 @@ def test_modify_column_families_flattened_error(): @pytest.mark.asyncio async def test_modify_column_families_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2886,7 +3323,7 @@ async def test_modify_column_families_flattened_async(): @pytest.mark.asyncio async def test_modify_column_families_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2912,7 +3349,7 @@ async def test_modify_column_families_flattened_error_async(): ) def test_drop_row_range(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2939,7 +3376,7 @@ def test_drop_row_range_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -2957,7 +3394,7 @@ async def test_drop_row_range_async( request_type=bigtable_table_admin.DropRowRangeRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2987,7 +3424,7 @@ async def test_drop_row_range_async_from_dict(): def test_drop_row_range_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3017,7 +3454,7 @@ def test_drop_row_range_field_headers(): @pytest.mark.asyncio async def test_drop_row_range_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3053,7 +3490,7 @@ async def test_drop_row_range_field_headers_async(): ) def test_generate_consistency_token(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3085,7 +3522,7 @@ def test_generate_consistency_token_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -3105,7 +3542,7 @@ async def test_generate_consistency_token_async( request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3142,7 +3579,7 @@ async def test_generate_consistency_token_async_from_dict(): def test_generate_consistency_token_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3174,7 +3611,7 @@ def test_generate_consistency_token_field_headers(): @pytest.mark.asyncio async def test_generate_consistency_token_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3207,7 +3644,7 @@ async def test_generate_consistency_token_field_headers_async(): def test_generate_consistency_token_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3233,7 +3670,7 @@ def test_generate_consistency_token_flattened(): def test_generate_consistency_token_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -3248,7 +3685,7 @@ def test_generate_consistency_token_flattened_error(): @pytest.mark.asyncio async def test_generate_consistency_token_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3279,7 +3716,7 @@ async def test_generate_consistency_token_flattened_async(): @pytest.mark.asyncio async def test_generate_consistency_token_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -3300,7 +3737,7 @@ async def test_generate_consistency_token_flattened_error_async(): ) def test_check_consistency(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3332,7 +3769,7 @@ def test_check_consistency_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -3352,7 +3789,7 @@ async def test_check_consistency_async( request_type=bigtable_table_admin.CheckConsistencyRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3389,7 +3826,7 @@ async def test_check_consistency_async_from_dict(): def test_check_consistency_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3421,7 +3858,7 @@ def test_check_consistency_field_headers(): @pytest.mark.asyncio async def test_check_consistency_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3454,7 +3891,7 @@ async def test_check_consistency_field_headers_async(): def test_check_consistency_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3484,7 +3921,7 @@ def test_check_consistency_flattened(): def test_check_consistency_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -3500,7 +3937,7 @@ def test_check_consistency_flattened_error(): @pytest.mark.asyncio async def test_check_consistency_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3535,7 +3972,7 @@ async def test_check_consistency_flattened_async(): @pytest.mark.asyncio async def test_check_consistency_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -3557,7 +3994,7 @@ async def test_check_consistency_flattened_error_async(): ) def test_snapshot_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3584,7 +4021,7 @@ def test_snapshot_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -3602,7 +4039,7 @@ async def test_snapshot_table_async( request_type=bigtable_table_admin.SnapshotTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3634,7 +4071,7 @@ async def test_snapshot_table_async_from_dict(): def test_snapshot_table_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3664,7 +4101,7 @@ def test_snapshot_table_field_headers(): @pytest.mark.asyncio async def test_snapshot_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3695,7 +4132,7 @@ async def test_snapshot_table_field_headers_async(): def test_snapshot_table_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3731,7 +4168,7 @@ def test_snapshot_table_flattened(): def test_snapshot_table_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -3749,7 +4186,7 @@ def test_snapshot_table_flattened_error(): @pytest.mark.asyncio async def test_snapshot_table_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3790,7 +4227,7 @@ async def test_snapshot_table_flattened_async(): @pytest.mark.asyncio async def test_snapshot_table_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -3814,7 +4251,7 @@ async def test_snapshot_table_flattened_error_async(): ) def test_get_snapshot(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3850,7 +4287,7 @@ def test_get_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -3868,7 +4305,7 @@ async def test_get_snapshot_async( request_type=bigtable_table_admin.GetSnapshotRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3909,7 +4346,7 @@ async def test_get_snapshot_async_from_dict(): def test_get_snapshot_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3939,7 +4376,7 @@ def test_get_snapshot_field_headers(): @pytest.mark.asyncio async def test_get_snapshot_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -3968,7 +4405,7 @@ async def test_get_snapshot_field_headers_async(): def test_get_snapshot_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -3992,7 +4429,7 @@ def test_get_snapshot_flattened(): def test_get_snapshot_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4007,7 +4444,7 @@ def test_get_snapshot_flattened_error(): @pytest.mark.asyncio async def test_get_snapshot_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4034,7 +4471,7 @@ async def test_get_snapshot_flattened_async(): @pytest.mark.asyncio async def test_get_snapshot_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4055,7 +4492,7 @@ async def test_get_snapshot_flattened_error_async(): ) def test_list_snapshots(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4085,7 +4522,7 @@ def test_list_snapshots_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -4103,7 +4540,7 @@ async def test_list_snapshots_async( request_type=bigtable_table_admin.ListSnapshotsRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4138,7 +4575,7 @@ async def test_list_snapshots_async_from_dict(): def test_list_snapshots_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4168,7 +4605,7 @@ def test_list_snapshots_field_headers(): @pytest.mark.asyncio async def test_list_snapshots_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4199,7 +4636,7 @@ async def test_list_snapshots_field_headers_async(): def test_list_snapshots_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4223,7 +4660,7 @@ def test_list_snapshots_flattened(): def test_list_snapshots_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4238,7 +4675,7 @@ def test_list_snapshots_flattened_error(): @pytest.mark.asyncio async def test_list_snapshots_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4267,7 +4704,7 @@ async def test_list_snapshots_flattened_async(): @pytest.mark.asyncio async def test_list_snapshots_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4281,7 +4718,7 @@ async def test_list_snapshots_flattened_error_async(): def test_list_snapshots_pager(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport_name, ) @@ -4331,7 +4768,7 @@ def test_list_snapshots_pager(transport_name: str = "grpc"): def test_list_snapshots_pages(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport_name, ) @@ -4373,7 +4810,7 @@ def test_list_snapshots_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_snapshots_async_pager(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4423,7 +4860,7 @@ async def test_list_snapshots_async_pager(): @pytest.mark.asyncio async def test_list_snapshots_async_pages(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4478,7 +4915,7 @@ async def test_list_snapshots_async_pages(): ) def test_delete_snapshot(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4505,7 +4942,7 @@ def test_delete_snapshot_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -4523,7 +4960,7 @@ async def test_delete_snapshot_async( request_type=bigtable_table_admin.DeleteSnapshotRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4553,7 +4990,7 @@ async def test_delete_snapshot_async_from_dict(): def test_delete_snapshot_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4583,7 +5020,7 @@ def test_delete_snapshot_field_headers(): @pytest.mark.asyncio async def test_delete_snapshot_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4612,7 +5049,7 @@ async def test_delete_snapshot_field_headers_async(): def test_delete_snapshot_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4636,7 +5073,7 @@ def test_delete_snapshot_flattened(): def test_delete_snapshot_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4651,7 +5088,7 @@ def test_delete_snapshot_flattened_error(): @pytest.mark.asyncio async def test_delete_snapshot_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4678,7 +5115,7 @@ async def test_delete_snapshot_flattened_async(): @pytest.mark.asyncio async def test_delete_snapshot_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4699,7 +5136,7 @@ async def test_delete_snapshot_flattened_error_async(): ) def test_create_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4726,7 +5163,7 @@ def test_create_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -4744,7 +5181,7 @@ async def test_create_backup_async( request_type=bigtable_table_admin.CreateBackupRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4776,7 +5213,7 @@ async def test_create_backup_async_from_dict(): def test_create_backup_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4806,7 +5243,7 @@ def test_create_backup_field_headers(): @pytest.mark.asyncio async def test_create_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -4837,7 +5274,7 @@ async def test_create_backup_field_headers_async(): def test_create_backup_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4869,7 +5306,7 @@ def test_create_backup_flattened(): def test_create_backup_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4886,7 +5323,7 @@ def test_create_backup_flattened_error(): @pytest.mark.asyncio async def test_create_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -4923,7 +5360,7 @@ async def test_create_backup_flattened_async(): @pytest.mark.asyncio async def test_create_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -4946,7 +5383,7 @@ async def test_create_backup_flattened_error_async(): ) def test_get_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4984,7 +5421,7 @@ def test_get_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -5001,7 +5438,7 @@ async def test_get_backup_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.GetBackupRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5044,7 +5481,7 @@ async def test_get_backup_async_from_dict(): def test_get_backup_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5074,7 +5511,7 @@ def test_get_backup_field_headers(): @pytest.mark.asyncio async def test_get_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5103,7 +5540,7 @@ async def test_get_backup_field_headers_async(): def test_get_backup_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5127,7 +5564,7 @@ def test_get_backup_flattened(): def test_get_backup_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5142,7 +5579,7 @@ def test_get_backup_flattened_error(): @pytest.mark.asyncio async def test_get_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5169,7 +5606,7 @@ async def test_get_backup_flattened_async(): @pytest.mark.asyncio async def test_get_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5190,7 +5627,7 @@ async def test_get_backup_flattened_error_async(): ) def test_update_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5228,7 +5665,7 @@ def test_update_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -5246,7 +5683,7 @@ async def test_update_backup_async( request_type=bigtable_table_admin.UpdateBackupRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5289,7 +5726,7 @@ async def test_update_backup_async_from_dict(): def test_update_backup_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5319,7 +5756,7 @@ def test_update_backup_field_headers(): @pytest.mark.asyncio async def test_update_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5348,7 +5785,7 @@ async def test_update_backup_field_headers_async(): def test_update_backup_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5376,7 +5813,7 @@ def test_update_backup_flattened(): def test_update_backup_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5392,7 +5829,7 @@ def test_update_backup_flattened_error(): @pytest.mark.asyncio async def test_update_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5423,7 +5860,7 @@ async def test_update_backup_flattened_async(): @pytest.mark.asyncio async def test_update_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5445,7 +5882,7 @@ async def test_update_backup_flattened_error_async(): ) def test_delete_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5472,7 +5909,7 @@ def test_delete_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -5490,7 +5927,7 @@ async def test_delete_backup_async( request_type=bigtable_table_admin.DeleteBackupRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5520,7 +5957,7 @@ async def test_delete_backup_async_from_dict(): def test_delete_backup_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5550,7 +5987,7 @@ def test_delete_backup_field_headers(): @pytest.mark.asyncio async def test_delete_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5579,7 +6016,7 @@ async def test_delete_backup_field_headers_async(): def test_delete_backup_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5603,7 +6040,7 @@ def test_delete_backup_flattened(): def test_delete_backup_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5618,7 +6055,7 @@ def test_delete_backup_flattened_error(): @pytest.mark.asyncio async def test_delete_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5645,7 +6082,7 @@ async def test_delete_backup_flattened_async(): @pytest.mark.asyncio async def test_delete_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5666,7 +6103,7 @@ async def test_delete_backup_flattened_error_async(): ) def test_list_backups(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5696,7 +6133,7 @@ def test_list_backups_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -5714,7 +6151,7 @@ async def test_list_backups_async( request_type=bigtable_table_admin.ListBackupsRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5749,7 +6186,7 @@ async def test_list_backups_async_from_dict(): def test_list_backups_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5779,7 +6216,7 @@ def test_list_backups_field_headers(): @pytest.mark.asyncio async def test_list_backups_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -5810,7 +6247,7 @@ async def test_list_backups_field_headers_async(): def test_list_backups_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5834,7 +6271,7 @@ def test_list_backups_flattened(): def test_list_backups_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5849,7 +6286,7 @@ def test_list_backups_flattened_error(): @pytest.mark.asyncio async def test_list_backups_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -5878,7 +6315,7 @@ async def test_list_backups_flattened_async(): @pytest.mark.asyncio async def test_list_backups_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -5892,7 +6329,7 @@ async def test_list_backups_flattened_error_async(): def test_list_backups_pager(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport_name, ) @@ -5942,7 +6379,7 @@ def test_list_backups_pager(transport_name: str = "grpc"): def test_list_backups_pages(transport_name: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport_name, ) @@ -5984,7 +6421,7 @@ def test_list_backups_pages(transport_name: str = "grpc"): @pytest.mark.asyncio async def test_list_backups_async_pager(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6034,7 +6471,7 @@ async def test_list_backups_async_pager(): @pytest.mark.asyncio async def test_list_backups_async_pages(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials, + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6089,7 +6526,7 @@ async def test_list_backups_async_pages(): ) def test_restore_table(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6116,7 +6553,7 @@ def test_restore_table_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -6134,7 +6571,7 @@ async def test_restore_table_async( request_type=bigtable_table_admin.RestoreTableRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6166,7 +6603,7 @@ async def test_restore_table_async_from_dict(): def test_restore_table_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -6196,7 +6633,7 @@ def test_restore_table_field_headers(): @pytest.mark.asyncio async def test_restore_table_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -6234,7 +6671,7 @@ async def test_restore_table_field_headers_async(): ) def test_copy_backup(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6261,7 +6698,7 @@ def test_copy_backup_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -6278,7 +6715,7 @@ async def test_copy_backup_async( transport: str = "grpc_asyncio", request_type=bigtable_table_admin.CopyBackupRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6310,7 +6747,7 @@ async def test_copy_backup_async_from_dict(): def test_copy_backup_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -6340,7 +6777,7 @@ def test_copy_backup_field_headers(): @pytest.mark.asyncio async def test_copy_backup_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -6371,7 +6808,7 @@ async def test_copy_backup_field_headers_async(): def test_copy_backup_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6407,7 +6844,7 @@ def test_copy_backup_flattened(): def test_copy_backup_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -6425,7 +6862,7 @@ def test_copy_backup_flattened_error(): @pytest.mark.asyncio async def test_copy_backup_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6466,7 +6903,7 @@ async def test_copy_backup_flattened_async(): @pytest.mark.asyncio async def test_copy_backup_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -6490,7 +6927,7 @@ async def test_copy_backup_flattened_error_async(): ) def test_get_iam_policy(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6522,7 +6959,7 @@ def test_get_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -6539,7 +6976,7 @@ async def test_get_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6576,7 +7013,7 @@ async def test_get_iam_policy_async_from_dict(): def test_get_iam_policy_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -6606,7 +7043,7 @@ def test_get_iam_policy_field_headers(): @pytest.mark.asyncio async def test_get_iam_policy_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -6635,7 +7072,7 @@ async def test_get_iam_policy_field_headers_async(): def test_get_iam_policy_from_dict_foreign(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call: @@ -6652,7 +7089,7 @@ def test_get_iam_policy_from_dict_foreign(): def test_get_iam_policy_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6676,7 +7113,7 @@ def test_get_iam_policy_flattened(): def test_get_iam_policy_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -6691,7 +7128,7 @@ def test_get_iam_policy_flattened_error(): @pytest.mark.asyncio async def test_get_iam_policy_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6718,7 +7155,7 @@ async def test_get_iam_policy_flattened_async(): @pytest.mark.asyncio async def test_get_iam_policy_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -6739,7 +7176,7 @@ async def test_get_iam_policy_flattened_error_async(): ) def test_set_iam_policy(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6771,7 +7208,7 @@ def test_set_iam_policy_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -6788,7 +7225,7 @@ async def test_set_iam_policy_async( transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -6825,7 +7262,7 @@ async def test_set_iam_policy_async_from_dict(): def test_set_iam_policy_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -6855,7 +7292,7 @@ def test_set_iam_policy_field_headers(): @pytest.mark.asyncio async def test_set_iam_policy_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -6884,7 +7321,7 @@ async def test_set_iam_policy_field_headers_async(): def test_set_iam_policy_from_dict_foreign(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call: @@ -6902,7 +7339,7 @@ def test_set_iam_policy_from_dict_foreign(): def test_set_iam_policy_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6926,7 +7363,7 @@ def test_set_iam_policy_flattened(): def test_set_iam_policy_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -6941,7 +7378,7 @@ def test_set_iam_policy_flattened_error(): @pytest.mark.asyncio async def test_set_iam_policy_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -6968,7 +7405,7 @@ async def test_set_iam_policy_flattened_async(): @pytest.mark.asyncio async def test_set_iam_policy_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -6989,7 +7426,7 @@ async def test_set_iam_policy_flattened_error_async(): ) def test_test_iam_permissions(request_type, transport: str = "grpc"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -7021,7 +7458,7 @@ def test_test_iam_permissions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -7041,7 +7478,7 @@ async def test_test_iam_permissions_async( request_type=iam_policy_pb2.TestIamPermissionsRequest, ): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -7078,7 +7515,7 @@ async def test_test_iam_permissions_async_from_dict(): def test_test_iam_permissions_field_headers(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -7110,7 +7547,7 @@ def test_test_iam_permissions_field_headers(): @pytest.mark.asyncio async def test_test_iam_permissions_field_headers_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -7143,7 +7580,7 @@ async def test_test_iam_permissions_field_headers_async(): def test_test_iam_permissions_from_dict_foreign(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. with mock.patch.object( @@ -7162,7 +7599,7 @@ def test_test_iam_permissions_from_dict_foreign(): def test_test_iam_permissions_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -7192,7 +7629,7 @@ def test_test_iam_permissions_flattened(): def test_test_iam_permissions_flattened_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -7208,7 +7645,7 @@ def test_test_iam_permissions_flattened_error(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -7243,7 +7680,7 @@ async def test_test_iam_permissions_flattened_async(): @pytest.mark.asyncio async def test_test_iam_permissions_flattened_error_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -7265,7 +7702,7 @@ async def test_test_iam_permissions_flattened_error_async(): ) def test_create_table_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -7321,7 +7758,7 @@ def test_create_table_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).create_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -7331,7 +7768,7 @@ def test_create_table_rest_required_fields( jsonified_request["tableId"] = "table_id_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).create_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -7342,7 +7779,7 @@ def test_create_table_rest_required_fields( assert jsonified_request["tableId"] == "table_id_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -7385,7 +7822,7 @@ def test_create_table_rest_required_fields( def test_create_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.create_table._get_unset_required_fields({}) @@ -7404,7 +7841,7 @@ def test_create_table_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_create_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -7460,7 +7897,7 @@ def test_create_table_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.CreateTableRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -7482,7 +7919,7 @@ def test_create_table_rest_bad_request( def test_create_table_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -7525,7 +7962,7 @@ def test_create_table_rest_flattened(): def test_create_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -7542,7 +7979,7 @@ def test_create_table_rest_flattened_error(transport: str = "rest"): def test_create_table_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -7555,7 +7992,7 @@ def test_create_table_rest_error(): ) def test_create_table_from_snapshot_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -7603,7 +8040,7 @@ def test_create_table_from_snapshot_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -7614,7 +8051,7 @@ def test_create_table_from_snapshot_rest_required_fields( jsonified_request["sourceSnapshot"] = "source_snapshot_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).create_table_from_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -7627,7 +8064,7 @@ def test_create_table_from_snapshot_rest_required_fields( assert jsonified_request["sourceSnapshot"] == "source_snapshot_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -7667,7 +8104,7 @@ def test_create_table_from_snapshot_rest_required_fields( def test_create_table_from_snapshot_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.create_table_from_snapshot._get_unset_required_fields({}) @@ -7686,7 +8123,7 @@ def test_create_table_from_snapshot_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_create_table_from_snapshot_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -7747,7 +8184,7 @@ def test_create_table_from_snapshot_rest_bad_request( request_type=bigtable_table_admin.CreateTableFromSnapshotRequest, ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -7769,7 +8206,7 @@ def test_create_table_from_snapshot_rest_bad_request( def test_create_table_from_snapshot_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -7811,7 +8248,7 @@ def test_create_table_from_snapshot_rest_flattened(): def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -7828,7 +8265,7 @@ def test_create_table_from_snapshot_rest_flattened_error(transport: str = "rest" def test_create_table_from_snapshot_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -7841,7 +8278,7 @@ def test_create_table_from_snapshot_rest_error(): ) def test_list_tables_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -7892,7 +8329,7 @@ def test_list_tables_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_tables._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -7901,7 +8338,7 @@ def test_list_tables_rest_required_fields( jsonified_request["parent"] = "parent_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_tables._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set( @@ -7918,7 +8355,7 @@ def test_list_tables_rest_required_fields( assert jsonified_request["parent"] == "parent_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -7960,7 +8397,7 @@ def test_list_tables_rest_required_fields( def test_list_tables_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.list_tables._get_unset_required_fields({}) @@ -7979,7 +8416,7 @@ def test_list_tables_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_list_tables_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -8037,7 +8474,7 @@ def test_list_tables_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.ListTablesRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8059,7 +8496,7 @@ def test_list_tables_rest_bad_request( def test_list_tables_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -8100,7 +8537,7 @@ def test_list_tables_rest_flattened(): def test_list_tables_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8115,7 +8552,7 @@ def test_list_tables_rest_flattened_error(transport: str = "rest"): def test_list_tables_rest_pager(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8185,7 +8622,7 @@ def test_list_tables_rest_pager(transport: str = "rest"): ) def test_get_table_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -8240,7 +8677,7 @@ def test_get_table_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -8249,7 +8686,7 @@ def test_get_table_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_table._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set(("view",)) @@ -8260,7 +8697,7 @@ def test_get_table_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -8302,7 +8739,7 @@ def test_get_table_rest_required_fields( def test_get_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.get_table._get_unset_required_fields({}) @@ -8312,7 +8749,7 @@ def test_get_table_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_get_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -8368,7 +8805,7 @@ def test_get_table_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.GetTableRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8390,7 +8827,7 @@ def test_get_table_rest_bad_request( def test_get_table_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -8431,7 +8868,7 @@ def test_get_table_rest_flattened(): def test_get_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8446,7 +8883,7 @@ def test_get_table_rest_flattened_error(transport: str = "rest"): def test_get_table_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -8459,7 +8896,7 @@ def test_get_table_rest_error(): ) def test_update_table_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -8591,14 +9028,14 @@ def test_update_table_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).update_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).update_table._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set(("update_mask",)) @@ -8607,7 +9044,7 @@ def test_update_table_rest_required_fields( # verify required fields with non-default values are left alone client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -8647,7 +9084,7 @@ def test_update_table_rest_required_fields( def test_update_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.update_table._get_unset_required_fields({}) @@ -8665,7 +9102,7 @@ def test_update_table_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_update_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -8725,7 +9162,7 @@ def test_update_table_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.UpdateTableRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8749,7 +9186,7 @@ def test_update_table_rest_bad_request( def test_update_table_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -8792,7 +9229,7 @@ def test_update_table_rest_flattened(): def test_update_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -8808,7 +9245,7 @@ def test_update_table_rest_flattened_error(transport: str = "rest"): def test_update_table_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -8821,7 +9258,7 @@ def test_update_table_rest_error(): ) def test_delete_table_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -8867,7 +9304,7 @@ def test_delete_table_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).delete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -8876,7 +9313,7 @@ def test_delete_table_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).delete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -8885,7 +9322,7 @@ def test_delete_table_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -8924,7 +9361,7 @@ def test_delete_table_rest_required_fields( def test_delete_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.delete_table._get_unset_required_fields({}) @@ -8934,7 +9371,7 @@ def test_delete_table_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_delete_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -8984,7 +9421,7 @@ def test_delete_table_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.DeleteTableRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -9006,7 +9443,7 @@ def test_delete_table_rest_bad_request( def test_delete_table_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -9045,7 +9482,7 @@ def test_delete_table_rest_flattened(): def test_delete_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -9060,7 +9497,7 @@ def test_delete_table_rest_flattened_error(transport: str = "rest"): def test_delete_table_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -9073,7 +9510,7 @@ def test_delete_table_rest_error(): ) def test_undelete_table_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -9119,7 +9556,7 @@ def test_undelete_table_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).undelete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -9128,7 +9565,7 @@ def test_undelete_table_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).undelete_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -9137,7 +9574,7 @@ def test_undelete_table_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -9177,7 +9614,7 @@ def test_undelete_table_rest_required_fields( def test_undelete_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.undelete_table._get_unset_required_fields({}) @@ -9187,7 +9624,7 @@ def test_undelete_table_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_undelete_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -9247,7 +9684,7 @@ def test_undelete_table_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.UndeleteTableRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -9269,7 +9706,7 @@ def test_undelete_table_rest_bad_request( def test_undelete_table_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -9309,7 +9746,7 @@ def test_undelete_table_rest_flattened(): def test_undelete_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -9324,7 +9761,7 @@ def test_undelete_table_rest_flattened_error(transport: str = "rest"): def test_undelete_table_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -9337,7 +9774,7 @@ def test_undelete_table_rest_error(): ) def test_modify_column_families_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -9392,7 +9829,7 @@ def test_modify_column_families_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).modify_column_families._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -9401,7 +9838,7 @@ def test_modify_column_families_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).modify_column_families._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -9410,7 +9847,7 @@ def test_modify_column_families_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -9453,7 +9890,7 @@ def test_modify_column_families_rest_required_fields( def test_modify_column_families_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.modify_column_families._get_unset_required_fields({}) @@ -9471,7 +9908,7 @@ def test_modify_column_families_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_modify_column_families_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -9528,7 +9965,7 @@ def test_modify_column_families_rest_bad_request( request_type=bigtable_table_admin.ModifyColumnFamiliesRequest, ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -9550,7 +9987,7 @@ def test_modify_column_families_rest_bad_request( def test_modify_column_families_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -9597,7 +10034,7 @@ def test_modify_column_families_rest_flattened(): def test_modify_column_families_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -9617,7 +10054,7 @@ def test_modify_column_families_rest_flattened_error(transport: str = "rest"): def test_modify_column_families_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -9630,7 +10067,7 @@ def test_modify_column_families_rest_error(): ) def test_drop_row_range_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -9676,7 +10113,7 @@ def test_drop_row_range_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).drop_row_range._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -9685,7 +10122,7 @@ def test_drop_row_range_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).drop_row_range._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -9694,7 +10131,7 @@ def test_drop_row_range_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -9734,7 +10171,7 @@ def test_drop_row_range_rest_required_fields( def test_drop_row_range_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.drop_row_range._get_unset_required_fields({}) @@ -9744,7 +10181,7 @@ def test_drop_row_range_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_drop_row_range_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -9794,7 +10231,7 @@ def test_drop_row_range_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.DropRowRangeRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -9816,7 +10253,7 @@ def test_drop_row_range_rest_bad_request( def test_drop_row_range_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -9829,7 +10266,7 @@ def test_drop_row_range_rest_error(): ) def test_generate_consistency_token_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -9882,7 +10319,7 @@ def test_generate_consistency_token_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).generate_consistency_token._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -9891,7 +10328,7 @@ def test_generate_consistency_token_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).generate_consistency_token._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -9900,7 +10337,7 @@ def test_generate_consistency_token_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -9945,7 +10382,7 @@ def test_generate_consistency_token_rest_required_fields( def test_generate_consistency_token_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.generate_consistency_token._get_unset_required_fields({}) @@ -9955,7 +10392,7 @@ def test_generate_consistency_token_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_generate_consistency_token_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -10016,7 +10453,7 @@ def test_generate_consistency_token_rest_bad_request( request_type=bigtable_table_admin.GenerateConsistencyTokenRequest, ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10038,7 +10475,7 @@ def test_generate_consistency_token_rest_bad_request( def test_generate_consistency_token_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -10082,7 +10519,7 @@ def test_generate_consistency_token_rest_flattened(): def test_generate_consistency_token_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10097,7 +10534,7 @@ def test_generate_consistency_token_rest_flattened_error(transport: str = "rest" def test_generate_consistency_token_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -10110,7 +10547,7 @@ def test_generate_consistency_token_rest_error(): ) def test_check_consistency_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -10162,7 +10599,7 @@ def test_check_consistency_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).check_consistency._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -10172,7 +10609,7 @@ def test_check_consistency_rest_required_fields( jsonified_request["consistencyToken"] = "consistency_token_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).check_consistency._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -10183,7 +10620,7 @@ def test_check_consistency_rest_required_fields( assert jsonified_request["consistencyToken"] == "consistency_token_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -10228,7 +10665,7 @@ def test_check_consistency_rest_required_fields( def test_check_consistency_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.check_consistency._get_unset_required_fields({}) @@ -10246,7 +10683,7 @@ def test_check_consistency_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_check_consistency_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -10306,7 +10743,7 @@ def test_check_consistency_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.CheckConsistencyRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10328,7 +10765,7 @@ def test_check_consistency_rest_bad_request( def test_check_consistency_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -10371,7 +10808,7 @@ def test_check_consistency_rest_flattened(): def test_check_consistency_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10387,7 +10824,7 @@ def test_check_consistency_rest_flattened_error(transport: str = "rest"): def test_check_consistency_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -10400,7 +10837,7 @@ def test_check_consistency_rest_error(): ) def test_snapshot_table_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -10448,7 +10885,7 @@ def test_snapshot_table_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).snapshot_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -10459,7 +10896,7 @@ def test_snapshot_table_rest_required_fields( jsonified_request["snapshotId"] = "snapshot_id_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).snapshot_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -10472,7 +10909,7 @@ def test_snapshot_table_rest_required_fields( assert jsonified_request["snapshotId"] == "snapshot_id_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -10512,7 +10949,7 @@ def test_snapshot_table_rest_required_fields( def test_snapshot_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.snapshot_table._get_unset_required_fields({}) @@ -10531,7 +10968,7 @@ def test_snapshot_table_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_snapshot_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -10591,7 +11028,7 @@ def test_snapshot_table_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.SnapshotTableRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10613,7 +11050,7 @@ def test_snapshot_table_rest_bad_request( def test_snapshot_table_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -10656,7 +11093,7 @@ def test_snapshot_table_rest_flattened(): def test_snapshot_table_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10674,7 +11111,7 @@ def test_snapshot_table_rest_flattened_error(transport: str = "rest"): def test_snapshot_table_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -10687,7 +11124,7 @@ def test_snapshot_table_rest_error(): ) def test_get_snapshot_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -10746,7 +11183,7 @@ def test_get_snapshot_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -10755,7 +11192,7 @@ def test_get_snapshot_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -10764,7 +11201,7 @@ def test_get_snapshot_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -10806,7 +11243,7 @@ def test_get_snapshot_rest_required_fields( def test_get_snapshot_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.get_snapshot._get_unset_required_fields({}) @@ -10816,7 +11253,7 @@ def test_get_snapshot_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_get_snapshot_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -10872,7 +11309,7 @@ def test_get_snapshot_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.GetSnapshotRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10896,7 +11333,7 @@ def test_get_snapshot_rest_bad_request( def test_get_snapshot_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -10940,7 +11377,7 @@ def test_get_snapshot_rest_flattened(): def test_get_snapshot_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -10955,7 +11392,7 @@ def test_get_snapshot_rest_flattened_error(transport: str = "rest"): def test_get_snapshot_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -10968,7 +11405,7 @@ def test_get_snapshot_rest_error(): ) def test_list_snapshots_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -11019,7 +11456,7 @@ def test_list_snapshots_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_snapshots._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -11028,7 +11465,7 @@ def test_list_snapshots_rest_required_fields( jsonified_request["parent"] = "parent_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_snapshots._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set( @@ -11044,7 +11481,7 @@ def test_list_snapshots_rest_required_fields( assert jsonified_request["parent"] == "parent_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -11086,7 +11523,7 @@ def test_list_snapshots_rest_required_fields( def test_list_snapshots_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.list_snapshots._get_unset_required_fields({}) @@ -11104,7 +11541,7 @@ def test_list_snapshots_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_list_snapshots_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -11162,7 +11599,7 @@ def test_list_snapshots_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.ListSnapshotsRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11184,7 +11621,7 @@ def test_list_snapshots_rest_bad_request( def test_list_snapshots_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -11228,7 +11665,7 @@ def test_list_snapshots_rest_flattened(): def test_list_snapshots_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11243,7 +11680,7 @@ def test_list_snapshots_rest_flattened_error(transport: str = "rest"): def test_list_snapshots_rest_pager(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11315,7 +11752,7 @@ def test_list_snapshots_rest_pager(transport: str = "rest"): ) def test_delete_snapshot_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -11363,7 +11800,7 @@ def test_delete_snapshot_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).delete_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -11372,7 +11809,7 @@ def test_delete_snapshot_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).delete_snapshot._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -11381,7 +11818,7 @@ def test_delete_snapshot_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -11420,7 +11857,7 @@ def test_delete_snapshot_rest_required_fields( def test_delete_snapshot_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.delete_snapshot._get_unset_required_fields({}) @@ -11430,7 +11867,7 @@ def test_delete_snapshot_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_delete_snapshot_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -11480,7 +11917,7 @@ def test_delete_snapshot_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.DeleteSnapshotRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11504,7 +11941,7 @@ def test_delete_snapshot_rest_bad_request( def test_delete_snapshot_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -11546,7 +11983,7 @@ def test_delete_snapshot_rest_flattened(): def test_delete_snapshot_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11561,7 +11998,7 @@ def test_delete_snapshot_rest_flattened_error(transport: str = "rest"): def test_delete_snapshot_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -11574,7 +12011,7 @@ def test_delete_snapshot_rest_error(): ) def test_create_backup_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -11713,7 +12150,7 @@ def test_create_backup_rest_required_fields( assert "backupId" not in jsonified_request unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).create_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -11725,7 +12162,7 @@ def test_create_backup_rest_required_fields( jsonified_request["backupId"] = "backup_id_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).create_backup._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set(("backup_id",)) @@ -11738,7 +12175,7 @@ def test_create_backup_rest_required_fields( assert jsonified_request["backupId"] == "backup_id_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -11784,7 +12221,7 @@ def test_create_backup_rest_required_fields( def test_create_backup_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.create_backup._get_unset_required_fields({}) @@ -11803,7 +12240,7 @@ def test_create_backup_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_create_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -11863,7 +12300,7 @@ def test_create_backup_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.CreateBackupRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11885,7 +12322,7 @@ def test_create_backup_rest_bad_request( def test_create_backup_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -11929,7 +12366,7 @@ def test_create_backup_rest_flattened(): def test_create_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -11946,7 +12383,7 @@ def test_create_backup_rest_flattened_error(transport: str = "rest"): def test_create_backup_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -11959,7 +12396,7 @@ def test_create_backup_rest_error(): ) def test_get_backup_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -12020,7 +12457,7 @@ def test_get_backup_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -12029,7 +12466,7 @@ def test_get_backup_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -12038,7 +12475,7 @@ def test_get_backup_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -12080,7 +12517,7 @@ def test_get_backup_rest_required_fields( def test_get_backup_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.get_backup._get_unset_required_fields({}) @@ -12090,7 +12527,7 @@ def test_get_backup_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_get_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -12146,7 +12583,7 @@ def test_get_backup_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.GetBackupRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -12170,7 +12607,7 @@ def test_get_backup_rest_bad_request( def test_get_backup_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -12214,7 +12651,7 @@ def test_get_backup_rest_flattened(): def test_get_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -12229,7 +12666,7 @@ def test_get_backup_rest_flattened_error(transport: str = "rest"): def test_get_backup_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -12242,7 +12679,7 @@ def test_get_backup_rest_error(): ) def test_update_backup_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -12395,14 +12832,14 @@ def test_update_backup_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).update_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) # verify required fields with default values are now present unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).update_backup._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set(("update_mask",)) @@ -12411,7 +12848,7 @@ def test_update_backup_rest_required_fields( # verify required fields with non-default values are left alone client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -12454,7 +12891,7 @@ def test_update_backup_rest_required_fields( def test_update_backup_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.update_backup._get_unset_required_fields({}) @@ -12472,7 +12909,7 @@ def test_update_backup_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_update_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -12528,7 +12965,7 @@ def test_update_backup_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.UpdateBackupRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -12554,7 +12991,7 @@ def test_update_backup_rest_bad_request( def test_update_backup_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -12601,7 +13038,7 @@ def test_update_backup_rest_flattened(): def test_update_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -12617,7 +13054,7 @@ def test_update_backup_rest_flattened_error(transport: str = "rest"): def test_update_backup_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -12630,7 +13067,7 @@ def test_update_backup_rest_error(): ) def test_delete_backup_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -12678,7 +13115,7 @@ def test_delete_backup_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).delete_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -12687,7 +13124,7 @@ def test_delete_backup_rest_required_fields( jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).delete_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -12696,7 +13133,7 @@ def test_delete_backup_rest_required_fields( assert jsonified_request["name"] == "name_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -12735,7 +13172,7 @@ def test_delete_backup_rest_required_fields( def test_delete_backup_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.delete_backup._get_unset_required_fields({}) @@ -12745,7 +13182,7 @@ def test_delete_backup_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_delete_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -12795,7 +13232,7 @@ def test_delete_backup_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.DeleteBackupRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -12819,7 +13256,7 @@ def test_delete_backup_rest_bad_request( def test_delete_backup_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -12861,7 +13298,7 @@ def test_delete_backup_rest_flattened(): def test_delete_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -12876,7 +13313,7 @@ def test_delete_backup_rest_flattened_error(transport: str = "rest"): def test_delete_backup_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -12889,7 +13326,7 @@ def test_delete_backup_rest_error(): ) def test_list_backups_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -12940,7 +13377,7 @@ def test_list_backups_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_backups._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -12949,7 +13386,7 @@ def test_list_backups_rest_required_fields( jsonified_request["parent"] = "parent_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).list_backups._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set( @@ -12967,7 +13404,7 @@ def test_list_backups_rest_required_fields( assert jsonified_request["parent"] == "parent_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -13009,7 +13446,7 @@ def test_list_backups_rest_required_fields( def test_list_backups_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.list_backups._get_unset_required_fields({}) @@ -13029,7 +13466,7 @@ def test_list_backups_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_list_backups_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -13087,7 +13524,7 @@ def test_list_backups_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.ListBackupsRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -13109,7 +13546,7 @@ def test_list_backups_rest_bad_request( def test_list_backups_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -13153,7 +13590,7 @@ def test_list_backups_rest_flattened(): def test_list_backups_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -13168,7 +13605,7 @@ def test_list_backups_rest_flattened_error(transport: str = "rest"): def test_list_backups_rest_pager(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -13240,7 +13677,7 @@ def test_list_backups_rest_pager(transport: str = "rest"): ) def test_restore_table_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -13287,7 +13724,7 @@ def test_restore_table_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).restore_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -13297,7 +13734,7 @@ def test_restore_table_rest_required_fields( jsonified_request["tableId"] = "table_id_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).restore_table._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -13308,7 +13745,7 @@ def test_restore_table_rest_required_fields( assert jsonified_request["tableId"] == "table_id_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -13348,7 +13785,7 @@ def test_restore_table_rest_required_fields( def test_restore_table_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.restore_table._get_unset_required_fields({}) @@ -13366,7 +13803,7 @@ def test_restore_table_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_restore_table_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -13426,7 +13863,7 @@ def test_restore_table_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.RestoreTableRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -13448,7 +13885,7 @@ def test_restore_table_rest_bad_request( def test_restore_table_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -13461,7 +13898,7 @@ def test_restore_table_rest_error(): ) def test_copy_backup_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -13509,7 +13946,7 @@ def test_copy_backup_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).copy_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -13520,7 +13957,7 @@ def test_copy_backup_rest_required_fields( jsonified_request["sourceBackup"] = "source_backup_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).copy_backup._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -13533,7 +13970,7 @@ def test_copy_backup_rest_required_fields( assert jsonified_request["sourceBackup"] == "source_backup_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -13573,7 +14010,7 @@ def test_copy_backup_rest_required_fields( def test_copy_backup_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.copy_backup._get_unset_required_fields({}) @@ -13593,7 +14030,7 @@ def test_copy_backup_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_copy_backup_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -13653,7 +14090,7 @@ def test_copy_backup_rest_bad_request( transport: str = "rest", request_type=bigtable_table_admin.CopyBackupRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -13675,7 +14112,7 @@ def test_copy_backup_rest_bad_request( def test_copy_backup_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -13720,7 +14157,7 @@ def test_copy_backup_rest_flattened(): def test_copy_backup_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -13738,7 +14175,7 @@ def test_copy_backup_rest_flattened_error(transport: str = "rest"): def test_copy_backup_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -13751,7 +14188,7 @@ def test_copy_backup_rest_error(): ) def test_get_iam_policy_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -13802,7 +14239,7 @@ def test_get_iam_policy_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_iam_policy._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -13811,7 +14248,7 @@ def test_get_iam_policy_rest_required_fields( jsonified_request["resource"] = "resource_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).get_iam_policy._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -13820,7 +14257,7 @@ def test_get_iam_policy_rest_required_fields( assert jsonified_request["resource"] == "resource_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -13861,7 +14298,7 @@ def test_get_iam_policy_rest_required_fields( def test_get_iam_policy_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.get_iam_policy._get_unset_required_fields({}) @@ -13871,7 +14308,7 @@ def test_get_iam_policy_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_get_iam_policy_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -13925,7 +14362,7 @@ def test_get_iam_policy_rest_bad_request( transport: str = "rest", request_type=iam_policy_pb2.GetIamPolicyRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -13947,7 +14384,7 @@ def test_get_iam_policy_rest_bad_request( def test_get_iam_policy_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -13989,7 +14426,7 @@ def test_get_iam_policy_rest_flattened(): def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -14004,7 +14441,7 @@ def test_get_iam_policy_rest_flattened_error(transport: str = "rest"): def test_get_iam_policy_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -14017,7 +14454,7 @@ def test_get_iam_policy_rest_error(): ) def test_set_iam_policy_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -14068,7 +14505,7 @@ def test_set_iam_policy_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).set_iam_policy._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -14077,7 +14514,7 @@ def test_set_iam_policy_rest_required_fields( jsonified_request["resource"] = "resource_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).set_iam_policy._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -14086,7 +14523,7 @@ def test_set_iam_policy_rest_required_fields( assert jsonified_request["resource"] == "resource_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -14127,7 +14564,7 @@ def test_set_iam_policy_rest_required_fields( def test_set_iam_policy_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.set_iam_policy._get_unset_required_fields({}) @@ -14145,7 +14582,7 @@ def test_set_iam_policy_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_set_iam_policy_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -14199,7 +14636,7 @@ def test_set_iam_policy_rest_bad_request( transport: str = "rest", request_type=iam_policy_pb2.SetIamPolicyRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -14221,7 +14658,7 @@ def test_set_iam_policy_rest_bad_request( def test_set_iam_policy_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -14263,7 +14700,7 @@ def test_set_iam_policy_rest_flattened(): def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -14278,7 +14715,7 @@ def test_set_iam_policy_rest_flattened_error(transport: str = "rest"): def test_set_iam_policy_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -14291,7 +14728,7 @@ def test_set_iam_policy_rest_error(): ) def test_test_iam_permissions_rest(request_type): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -14341,7 +14778,7 @@ def test_test_iam_permissions_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).test_iam_permissions._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -14351,7 +14788,7 @@ def test_test_iam_permissions_rest_required_fields( jsonified_request["permissions"] = "permissions_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).test_iam_permissions._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -14362,7 +14799,7 @@ def test_test_iam_permissions_rest_required_fields( assert jsonified_request["permissions"] == "permissions_value" client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -14403,7 +14840,7 @@ def test_test_iam_permissions_rest_required_fields( def test_test_iam_permissions_rest_unset_required_fields(): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.test_iam_permissions._get_unset_required_fields({}) @@ -14421,7 +14858,7 @@ def test_test_iam_permissions_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_test_iam_permissions_rest_interceptors(null_interceptor): transport = transports.BigtableTableAdminRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableTableAdminRestInterceptor(), @@ -14477,7 +14914,7 @@ def test_test_iam_permissions_rest_bad_request( transport: str = "rest", request_type=iam_policy_pb2.TestIamPermissionsRequest ): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -14499,7 +14936,7 @@ def test_test_iam_permissions_rest_bad_request( def test_test_iam_permissions_rest_flattened(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -14542,7 +14979,7 @@ def test_test_iam_permissions_rest_flattened(): def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -14558,24 +14995,24 @@ def test_test_iam_permissions_rest_flattened_error(transport: str = "rest"): def test_test_iam_permissions_rest_error(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) with pytest.raises(ValueError): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) with pytest.raises(ValueError): client = BigtableTableAdminClient( @@ -14585,7 +15022,7 @@ def test_credentials_transport_error(): # It is an error to provide an api_key and a transport instance. transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) options = client_options.ClientOptions() options.api_key = "api_key" @@ -14596,16 +15033,17 @@ def test_credentials_transport_error(): ) # It is an error to provide an api_key and a credential. - options = mock.Mock() + options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = BigtableTableAdminClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # It is an error to provide scopes and a transport instance. transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) with pytest.raises(ValueError): client = BigtableTableAdminClient( @@ -14617,7 +15055,7 @@ def test_credentials_transport_error(): def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) client = BigtableTableAdminClient(transport=transport) assert client.transport is transport @@ -14626,13 +15064,13 @@ def test_transport_instance(): def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.BigtableTableAdminGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) channel = transport.grpc_channel assert channel transport = transports.BigtableTableAdminGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) channel = transport.grpc_channel assert channel @@ -14649,7 +15087,7 @@ def test_transport_get_channel(): def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) + adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) transport_class() adc.assert_called_once() @@ -14663,7 +15101,7 @@ def test_transport_adc(transport_class): ) def test_transport_kind(transport_name): transport = BigtableTableAdminClient.get_transport_class(transport_name)( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) assert transport.kind == transport_name @@ -14671,7 +15109,7 @@ def test_transport_kind(transport_name): def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) assert isinstance( client.transport, @@ -14683,7 +15121,7 @@ def test_bigtable_table_admin_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.BigtableTableAdminTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), credentials_file="credentials.json", ) @@ -14695,7 +15133,7 @@ def test_bigtable_table_admin_base_transport(): ) as Transport: Transport.return_value = None transport = transports.BigtableTableAdminTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Every method on the transport should just blindly @@ -14756,7 +15194,7 @@ def test_bigtable_table_admin_base_transport_with_credentials_file(): "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + load_creds.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) transport = transports.BigtableTableAdminTransport( credentials_file="credentials.json", quota_project_id="octopus", @@ -14782,7 +15220,7 @@ def test_bigtable_table_admin_base_transport_with_adc(): "google.cloud.bigtable_admin_v2.services.bigtable_table_admin.transports.BigtableTableAdminTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) + adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) transport = transports.BigtableTableAdminTransport() adc.assert_called_once() @@ -14790,7 +15228,7 @@ def test_bigtable_table_admin_base_transport_with_adc(): def test_bigtable_table_admin_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) + adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) BigtableTableAdminClient() adc.assert_called_once_with( scopes=None, @@ -14817,7 +15255,7 @@ def test_bigtable_table_admin_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) + adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], @@ -14871,7 +15309,7 @@ def test_bigtable_table_admin_transport_create_channel(transport_class, grpc_hel ) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: - creds = ga_credentials.AnonymousCredentials() + creds = _AnonymousCredentialsWithUniverseDomain() adc.return_value = (creds, None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) @@ -14908,7 +15346,7 @@ def test_bigtable_table_admin_transport_create_channel(transport_class, grpc_hel def test_bigtable_table_admin_grpc_transport_client_cert_source_for_mtls( transport_class, ): - cred = ga_credentials.AnonymousCredentials() + cred = _AnonymousCredentialsWithUniverseDomain() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: @@ -14946,7 +15384,7 @@ def test_bigtable_table_admin_grpc_transport_client_cert_source_for_mtls( def test_bigtable_table_admin_http_transport_client_cert_source_for_mtls(): - cred = ga_credentials.AnonymousCredentials() + cred = _AnonymousCredentialsWithUniverseDomain() with mock.patch( "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" ) as mock_configure_mtls_channel: @@ -14958,7 +15396,7 @@ def test_bigtable_table_admin_http_transport_client_cert_source_for_mtls(): def test_bigtable_table_admin_rest_lro_client(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) transport = client.transport @@ -14983,7 +15421,7 @@ def test_bigtable_table_admin_rest_lro_client(): ) def test_bigtable_table_admin_host_no_port(transport_name): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com" ), @@ -15006,7 +15444,7 @@ def test_bigtable_table_admin_host_no_port(transport_name): ) def test_bigtable_table_admin_host_with_port(transport_name): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), client_options=client_options.ClientOptions( api_endpoint="bigtableadmin.googleapis.com:8000" ), @@ -15026,8 +15464,8 @@ def test_bigtable_table_admin_host_with_port(transport_name): ], ) def test_bigtable_table_admin_client_transport_session_collision(transport_name): - creds1 = ga_credentials.AnonymousCredentials() - creds2 = ga_credentials.AnonymousCredentials() + creds1 = _AnonymousCredentialsWithUniverseDomain() + creds2 = _AnonymousCredentialsWithUniverseDomain() client1 = BigtableTableAdminClient( credentials=creds1, transport=transport_name, @@ -15163,7 +15601,7 @@ def test_bigtable_table_admin_transport_channel_mtls_with_client_cert_source( mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel - cred = ga_credentials.AnonymousCredentials() + cred = _AnonymousCredentialsWithUniverseDomain() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) @@ -15241,7 +15679,7 @@ def test_bigtable_table_admin_transport_channel_mtls_with_adc(transport_class): def test_bigtable_table_admin_grpc_lro_client(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) transport = client.transport @@ -15258,7 +15696,7 @@ def test_bigtable_table_admin_grpc_lro_client(): def test_bigtable_table_admin_grpc_lro_async_client(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc_asyncio", ) transport = client.transport @@ -15552,7 +15990,7 @@ def test_client_with_default_client_info(): transports.BigtableTableAdminTransport, "_prep_wrapped_messages" ) as prep: client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -15562,7 +16000,7 @@ def test_client_with_default_client_info(): ) as prep: transport_class = BigtableTableAdminClient.get_transport_class() transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -15571,7 +16009,7 @@ def test_client_with_default_client_info(): @pytest.mark.asyncio async def test_transport_close_async(): client = BigtableTableAdminAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc_asyncio", ) with mock.patch.object( @@ -15590,7 +16028,7 @@ def test_transport_close(): for transport, close_name in transports.items(): client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport ) with mock.patch.object( type(getattr(client.transport, close_name)), "close" @@ -15607,7 +16045,7 @@ def test_client_ctx(): ] for transport in transports: client = BigtableTableAdminClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport ) # Test client calls underlying transport. with mock.patch.object(type(client.transport), "close") as close: @@ -15641,7 +16079,9 @@ def test_api_key_credentials(client_class, transport_class): patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, diff --git a/tests/unit/gapic/bigtable_v2/test_bigtable.py b/tests/unit/gapic/bigtable_v2/test_bigtable.py index 2319306d7..03c520837 100644 --- a/tests/unit/gapic/bigtable_v2/test_bigtable.py +++ b/tests/unit/gapic/bigtable_v2/test_bigtable.py @@ -29,6 +29,7 @@ import json import math import pytest +from google.api_core import api_core_version from proto.marshal.rules.dates import DurationRule, TimestampRule from proto.marshal.rules import wrappers from requests import Response @@ -71,6 +72,29 @@ def modify_default_endpoint(client): ) +# If default endpoint template is localhost, then default mtls endpoint will be the same. +# This method modifies the default endpoint template so the client can produce a different +# mtls endpoint for endpoint testing purposes. +def modify_default_endpoint_template(client): + return ( + "test.{UNIVERSE_DOMAIN}" + if ("localhost" in client._DEFAULT_ENDPOINT_TEMPLATE) + else client._DEFAULT_ENDPOINT_TEMPLATE + ) + + +# Anonymous Credentials with universe domain property. If no universe domain is provided, then +# the default universe domain is "googleapis.com". +class _AnonymousCredentialsWithUniverseDomain(ga_credentials.AnonymousCredentials): + def __init__(self, universe_domain="googleapis.com"): + super(_AnonymousCredentialsWithUniverseDomain, self).__init__() + self._universe_domain = universe_domain + + @property + def universe_domain(self): + return self._universe_domain + + def test__get_default_mtls_endpoint(): api_endpoint = "example.googleapis.com" api_mtls_endpoint = "example.mtls.googleapis.com" @@ -95,6 +119,244 @@ def test__get_default_mtls_endpoint(): assert BigtableClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi +def test__read_environment_variables(): + assert BigtableClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + assert BigtableClient._read_environment_variables() == (True, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}): + assert BigtableClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + BigtableClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + assert BigtableClient._read_environment_variables() == (False, "never", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + assert BigtableClient._read_environment_variables() == (False, "always", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}): + assert BigtableClient._read_environment_variables() == (False, "auto", None) + + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableClient._read_environment_variables() + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + with mock.patch.dict(os.environ, {"GOOGLE_CLOUD_UNIVERSE_DOMAIN": "foo.com"}): + assert BigtableClient._read_environment_variables() == ( + False, + "auto", + "foo.com", + ) + + +def test__get_client_cert_source(): + mock_provided_cert_source = mock.Mock() + mock_default_cert_source = mock.Mock() + + assert BigtableClient._get_client_cert_source(None, False) is None + assert ( + BigtableClient._get_client_cert_source(mock_provided_cert_source, False) is None + ) + assert ( + BigtableClient._get_client_cert_source(mock_provided_cert_source, True) + == mock_provided_cert_source + ) + + with mock.patch( + "google.auth.transport.mtls.has_default_client_cert_source", return_value=True + ): + with mock.patch( + "google.auth.transport.mtls.default_client_cert_source", + return_value=mock_default_cert_source, + ): + assert ( + BigtableClient._get_client_cert_source(None, True) + is mock_default_cert_source + ) + assert ( + BigtableClient._get_client_cert_source( + mock_provided_cert_source, "true" + ) + is mock_provided_cert_source + ) + + +@mock.patch.object( + BigtableClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableClient), +) +@mock.patch.object( + BigtableAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableAsyncClient), +) +def test__get_api_endpoint(): + api_override = "foo.com" + mock_client_cert_source = mock.Mock() + default_universe = BigtableClient._DEFAULT_UNIVERSE + default_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + assert ( + BigtableClient._get_api_endpoint( + api_override, mock_client_cert_source, default_universe, "always" + ) + == api_override + ) + assert ( + BigtableClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "auto" + ) + == BigtableClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableClient._get_api_endpoint(None, None, default_universe, "auto") + == default_endpoint + ) + assert ( + BigtableClient._get_api_endpoint(None, None, default_universe, "always") + == BigtableClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableClient._get_api_endpoint( + None, mock_client_cert_source, default_universe, "always" + ) + == BigtableClient.DEFAULT_MTLS_ENDPOINT + ) + assert ( + BigtableClient._get_api_endpoint(None, None, mock_universe, "never") + == mock_endpoint + ) + assert ( + BigtableClient._get_api_endpoint(None, None, default_universe, "never") + == default_endpoint + ) + + with pytest.raises(MutualTLSChannelError) as excinfo: + BigtableClient._get_api_endpoint( + None, mock_client_cert_source, mock_universe, "auto" + ) + assert ( + str(excinfo.value) + == "mTLS is not supported in any universe other than googleapis.com." + ) + + +def test__get_universe_domain(): + client_universe_domain = "foo.com" + universe_domain_env = "bar.com" + + assert ( + BigtableClient._get_universe_domain(client_universe_domain, universe_domain_env) + == client_universe_domain + ) + assert ( + BigtableClient._get_universe_domain(None, universe_domain_env) + == universe_domain_env + ) + assert ( + BigtableClient._get_universe_domain(None, None) + == BigtableClient._DEFAULT_UNIVERSE + ) + + with pytest.raises(ValueError) as excinfo: + BigtableClient._get_universe_domain("", None) + assert str(excinfo.value) == "Universe Domain cannot be an empty string." + + +@pytest.mark.parametrize( + "client_class,transport_class,transport_name", + [ + (BigtableClient, transports.BigtableGrpcTransport, "grpc"), + (BigtableClient, transports.BigtableRestTransport, "rest"), + ], +) +def test__validate_universe_domain(client_class, transport_class, transport_name): + client = client_class( + transport=transport_class(credentials=_AnonymousCredentialsWithUniverseDomain()) + ) + assert client._validate_universe_domain() == True + + # Test the case when universe is already validated. + assert client._validate_universe_domain() == True + + if transport_name == "grpc": + # Test the case where credentials are provided by the + # `local_channel_credentials`. The default universes in both match. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + client = client_class(transport=transport_class(channel=channel)) + assert client._validate_universe_domain() == True + + # Test the case where credentials do not exist: e.g. a transport is provided + # with no credentials. Validation should still succeed because there is no + # mismatch with non-existent credentials. + channel = grpc.secure_channel( + "http://localhost/", grpc.local_channel_credentials() + ) + transport = transport_class(channel=channel) + transport._credentials = None + client = client_class(transport=transport) + assert client._validate_universe_domain() == True + + # Test the case when there is a universe mismatch from the credentials. + client = client_class( + transport=transport_class( + credentials=_AnonymousCredentialsWithUniverseDomain( + universe_domain="foo.com" + ) + ) + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (googleapis.com) does not match the universe domain found in the credentials (foo.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + # Test the case when there is a universe mismatch from the client. + # + # TODO: Make this test unconditional once the minimum supported version of + # google-api-core becomes 2.15.0 or higher. + api_core_major, api_core_minor, _ = [ + int(part) for part in api_core_version.__version__.split(".") + ] + if api_core_major > 2 or (api_core_major == 2 and api_core_minor >= 15): + client = client_class( + client_options={"universe_domain": "bar.com"}, + transport=transport_class( + credentials=_AnonymousCredentialsWithUniverseDomain(), + ), + ) + with pytest.raises(ValueError) as excinfo: + client._validate_universe_domain() + assert ( + str(excinfo.value) + == "The configured universe domain (bar.com) does not match the universe domain found in the credentials (googleapis.com). If you haven't configured the universe domain explicitly, `googleapis.com` is the default." + ) + + @pytest.mark.parametrize( "client_class,transport_name", [ @@ -104,7 +366,7 @@ def test__get_default_mtls_endpoint(): ], ) def test_bigtable_client_from_service_account_info(client_class, transport_name): - creds = ga_credentials.AnonymousCredentials() + creds = _AnonymousCredentialsWithUniverseDomain() with mock.patch.object( service_account.Credentials, "from_service_account_info" ) as factory: @@ -156,7 +418,7 @@ def test_bigtable_client_service_account_always_use_jwt( ], ) def test_bigtable_client_from_service_account_file(client_class, transport_name): - creds = ga_credentials.AnonymousCredentials() + creds = _AnonymousCredentialsWithUniverseDomain() with mock.patch.object( service_account.Credentials, "from_service_account_file" ) as factory: @@ -201,17 +463,21 @@ def test_bigtable_client_get_transport_class(): ], ) @mock.patch.object( - BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient) + BigtableClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableClient), ) @mock.patch.object( BigtableAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableAsyncClient), ) def test_bigtable_client_client_options(client_class, transport_class, transport_name): # Check that if channel is provided we won't create a new one. with mock.patch.object(BigtableClient, "get_transport_class") as gtc: - transport = transport_class(credentials=ga_credentials.AnonymousCredentials()) + transport = transport_class( + credentials=_AnonymousCredentialsWithUniverseDomain() + ) client = client_class(transport=transport) gtc.assert_not_called() @@ -246,7 +512,9 @@ def test_bigtable_client_client_options(client_class, transport_class, transport patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -276,15 +544,23 @@ def test_bigtable_client_client_options(client_class, transport_class, transport # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has # unsupported value. with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): - with pytest.raises(MutualTLSChannelError): + with pytest.raises(MutualTLSChannelError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. with mock.patch.dict( os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} ): - with pytest.raises(ValueError): + with pytest.raises(ValueError) as excinfo: client = client_class(transport=transport_name) + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) # Check the case quota_project_id is provided options = client_options.ClientOptions(quota_project_id="octopus") @@ -294,7 +570,9 @@ def test_bigtable_client_client_options(client_class, transport_class, transport patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id="octopus", @@ -312,7 +590,9 @@ def test_bigtable_client_client_options(client_class, transport_class, transport patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -344,12 +624,14 @@ def test_bigtable_client_client_options(client_class, transport_class, transport ], ) @mock.patch.object( - BigtableClient, "DEFAULT_ENDPOINT", modify_default_endpoint(BigtableClient) + BigtableClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableClient), ) @mock.patch.object( BigtableAsyncClient, - "DEFAULT_ENDPOINT", - modify_default_endpoint(BigtableAsyncClient), + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableAsyncClient), ) @mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"}) def test_bigtable_client_mtls_env_auto( @@ -372,7 +654,9 @@ def test_bigtable_client_mtls_env_auto( if use_client_cert_env == "false": expected_client_cert_source = None - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) else: expected_client_cert_source = client_cert_source_callback expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -404,7 +688,9 @@ def test_bigtable_client_mtls_env_auto( return_value=client_cert_source_callback, ): if use_client_cert_env == "false": - expected_host = client.DEFAULT_ENDPOINT + expected_host = client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ) expected_client_cert_source = None else: expected_host = client.DEFAULT_MTLS_ENDPOINT @@ -438,7 +724,9 @@ def test_bigtable_client_mtls_env_auto( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -524,6 +812,116 @@ def test_bigtable_client_get_mtls_endpoint_and_cert_source(client_class): assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT assert cert_source == mock_client_cert_source + # Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has + # unsupported value. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}): + with pytest.raises(MutualTLSChannelError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_MTLS_ENDPOINT` must be `never`, `auto` or `always`" + ) + + # Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value. + with mock.patch.dict( + os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"} + ): + with pytest.raises(ValueError) as excinfo: + client_class.get_mtls_endpoint_and_cert_source() + + assert ( + str(excinfo.value) + == "Environment variable `GOOGLE_API_USE_CLIENT_CERTIFICATE` must be either `true` or `false`" + ) + + +@pytest.mark.parametrize("client_class", [BigtableClient, BigtableAsyncClient]) +@mock.patch.object( + BigtableClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableClient), +) +@mock.patch.object( + BigtableAsyncClient, + "_DEFAULT_ENDPOINT_TEMPLATE", + modify_default_endpoint_template(BigtableAsyncClient), +) +def test_bigtable_client_client_api_endpoint(client_class): + mock_client_cert_source = client_cert_source_callback + api_override = "foo.com" + default_universe = BigtableClient._DEFAULT_UNIVERSE + default_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=default_universe + ) + mock_universe = "bar.com" + mock_endpoint = BigtableClient._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=mock_universe + ) + + # If ClientOptions.api_endpoint is set and GOOGLE_API_USE_CLIENT_CERTIFICATE="true", + # use ClientOptions.api_endpoint as the api endpoint regardless. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}): + with mock.patch( + "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" + ): + options = client_options.ClientOptions( + client_cert_source=mock_client_cert_source, api_endpoint=api_override + ) + client = client_class( + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), + ) + assert client.api_endpoint == api_override + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class(credentials=_AnonymousCredentialsWithUniverseDomain()) + assert client.api_endpoint == default_endpoint + + # If ClientOptions.api_endpoint is not set and GOOGLE_API_USE_MTLS_ENDPOINT="always", + # use the DEFAULT_MTLS_ENDPOINT as the api endpoint. + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}): + client = client_class(credentials=_AnonymousCredentialsWithUniverseDomain()) + assert client.api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT + + # If ClientOptions.api_endpoint is not set, GOOGLE_API_USE_MTLS_ENDPOINT="auto" (default), + # GOOGLE_API_USE_CLIENT_CERTIFICATE="false" (default), default cert source doesn't exist, + # and ClientOptions.universe_domain="bar.com", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with universe domain as the api endpoint. + options = client_options.ClientOptions() + universe_exists = hasattr(options, "universe_domain") + if universe_exists: + options = client_options.ClientOptions(universe_domain=mock_universe) + client = client_class( + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), + ) + else: + client = client_class( + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), + ) + assert client.api_endpoint == ( + mock_endpoint if universe_exists else default_endpoint + ) + assert client.universe_domain == ( + mock_universe if universe_exists else default_universe + ) + + # If ClientOptions does not have a universe domain attribute and GOOGLE_API_USE_MTLS_ENDPOINT="never", + # use the _DEFAULT_ENDPOINT_TEMPLATE populated with GDU as the api endpoint. + options = client_options.ClientOptions() + if hasattr(options, "universe_domain"): + delattr(options, "universe_domain") + with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}): + client = client_class( + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), + ) + assert client.api_endpoint == default_endpoint + @pytest.mark.parametrize( "client_class,transport_class,transport_name", @@ -546,7 +944,9 @@ def test_bigtable_client_client_options_scopes( patched.assert_called_once_with( credentials=None, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=["1", "2"], client_cert_source_for_mtls=None, quota_project_id=None, @@ -581,7 +981,9 @@ def test_bigtable_client_client_options_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -634,7 +1036,9 @@ def test_bigtable_client_create_channel_credentials_file( patched.assert_called_once_with( credentials=None, credentials_file="credentials.json", - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None, @@ -651,8 +1055,8 @@ def test_bigtable_client_create_channel_credentials_file( ) as adc, mock.patch.object( grpc_helpers, "create_channel" ) as create_channel: - creds = ga_credentials.AnonymousCredentials() - file_creds = ga_credentials.AnonymousCredentials() + creds = _AnonymousCredentialsWithUniverseDomain() + file_creds = _AnonymousCredentialsWithUniverseDomain() load_creds.return_value = (file_creds, None) adc.return_value = (creds, None) client = client_class(client_options=options, transport=transport_name) @@ -688,7 +1092,7 @@ def test_bigtable_client_create_channel_credentials_file( ) def test_read_rows(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -716,7 +1120,7 @@ def test_read_rows_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -733,7 +1137,7 @@ async def test_read_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadRowsRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -767,7 +1171,7 @@ async def test_read_rows_async_from_dict(): def test_read_rows_routing_parameters(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -810,7 +1214,7 @@ def test_read_rows_routing_parameters(): def test_read_rows_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -838,7 +1242,7 @@ def test_read_rows_flattened(): def test_read_rows_flattened_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -854,7 +1258,7 @@ def test_read_rows_flattened_error(): @pytest.mark.asyncio async def test_read_rows_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -885,7 +1289,7 @@ async def test_read_rows_flattened_async(): @pytest.mark.asyncio async def test_read_rows_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -907,7 +1311,7 @@ async def test_read_rows_flattened_error_async(): ) def test_sample_row_keys(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -935,7 +1339,7 @@ def test_sample_row_keys_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -952,7 +1356,7 @@ async def test_sample_row_keys_async( transport: str = "grpc_asyncio", request_type=bigtable.SampleRowKeysRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -986,7 +1390,7 @@ async def test_sample_row_keys_async_from_dict(): def test_sample_row_keys_routing_parameters(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1029,7 +1433,7 @@ def test_sample_row_keys_routing_parameters(): def test_sample_row_keys_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1057,7 +1461,7 @@ def test_sample_row_keys_flattened(): def test_sample_row_keys_flattened_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1073,7 +1477,7 @@ def test_sample_row_keys_flattened_error(): @pytest.mark.asyncio async def test_sample_row_keys_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1104,7 +1508,7 @@ async def test_sample_row_keys_flattened_async(): @pytest.mark.asyncio async def test_sample_row_keys_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1126,7 +1530,7 @@ async def test_sample_row_keys_flattened_error_async(): ) def test_mutate_row(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1153,7 +1557,7 @@ def test_mutate_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -1170,7 +1574,7 @@ async def test_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1202,7 +1606,7 @@ async def test_mutate_row_async_from_dict(): def test_mutate_row_routing_parameters(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1245,7 +1649,7 @@ def test_mutate_row_routing_parameters(): def test_mutate_row_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1289,7 +1693,7 @@ def test_mutate_row_flattened(): def test_mutate_row_flattened_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1311,7 +1715,7 @@ def test_mutate_row_flattened_error(): @pytest.mark.asyncio async def test_mutate_row_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1360,7 +1764,7 @@ async def test_mutate_row_flattened_async(): @pytest.mark.asyncio async def test_mutate_row_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1388,7 +1792,7 @@ async def test_mutate_row_flattened_error_async(): ) def test_mutate_rows(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1416,7 +1820,7 @@ def test_mutate_rows_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -1433,7 +1837,7 @@ async def test_mutate_rows_async( transport: str = "grpc_asyncio", request_type=bigtable.MutateRowsRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1467,7 +1871,7 @@ async def test_mutate_rows_async_from_dict(): def test_mutate_rows_routing_parameters(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1510,7 +1914,7 @@ def test_mutate_rows_routing_parameters(): def test_mutate_rows_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1542,7 +1946,7 @@ def test_mutate_rows_flattened(): def test_mutate_rows_flattened_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1559,7 +1963,7 @@ def test_mutate_rows_flattened_error(): @pytest.mark.asyncio async def test_mutate_rows_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1594,7 +1998,7 @@ async def test_mutate_rows_flattened_async(): @pytest.mark.asyncio async def test_mutate_rows_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1617,7 +2021,7 @@ async def test_mutate_rows_flattened_error_async(): ) def test_check_and_mutate_row(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1649,7 +2053,7 @@ def test_check_and_mutate_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -1668,7 +2072,7 @@ async def test_check_and_mutate_row_async( transport: str = "grpc_asyncio", request_type=bigtable.CheckAndMutateRowRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -1705,7 +2109,7 @@ async def test_check_and_mutate_row_async_from_dict(): def test_check_and_mutate_row_routing_parameters(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -1752,7 +2156,7 @@ def test_check_and_mutate_row_routing_parameters(): def test_check_and_mutate_row_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1832,7 +2236,7 @@ def test_check_and_mutate_row_flattened(): def test_check_and_mutate_row_flattened_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1870,7 +2274,7 @@ def test_check_and_mutate_row_flattened_error(): @pytest.mark.asyncio async def test_check_and_mutate_row_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -1955,7 +2359,7 @@ async def test_check_and_mutate_row_flattened_async(): @pytest.mark.asyncio async def test_check_and_mutate_row_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -1999,7 +2403,7 @@ async def test_check_and_mutate_row_flattened_error_async(): ) def test_ping_and_warm(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2026,7 +2430,7 @@ def test_ping_and_warm_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -2043,7 +2447,7 @@ async def test_ping_and_warm_async( transport: str = "grpc_asyncio", request_type=bigtable.PingAndWarmRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2075,7 +2479,7 @@ async def test_ping_and_warm_async_from_dict(): def test_ping_and_warm_routing_parameters(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2118,7 +2522,7 @@ def test_ping_and_warm_routing_parameters(): def test_ping_and_warm_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2146,7 +2550,7 @@ def test_ping_and_warm_flattened(): def test_ping_and_warm_flattened_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2162,7 +2566,7 @@ def test_ping_and_warm_flattened_error(): @pytest.mark.asyncio async def test_ping_and_warm_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2195,7 +2599,7 @@ async def test_ping_and_warm_flattened_async(): @pytest.mark.asyncio async def test_ping_and_warm_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2217,7 +2621,7 @@ async def test_ping_and_warm_flattened_error_async(): ) def test_read_modify_write_row(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2246,7 +2650,7 @@ def test_read_modify_write_row_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -2265,7 +2669,7 @@ async def test_read_modify_write_row_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadModifyWriteRowRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2299,7 +2703,7 @@ async def test_read_modify_write_row_async_from_dict(): def test_read_modify_write_row_routing_parameters(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2346,7 +2750,7 @@ def test_read_modify_write_row_routing_parameters(): def test_read_modify_write_row_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2384,7 +2788,7 @@ def test_read_modify_write_row_flattened(): def test_read_modify_write_row_flattened_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2402,7 +2806,7 @@ def test_read_modify_write_row_flattened_error(): @pytest.mark.asyncio async def test_read_modify_write_row_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2445,7 +2849,7 @@ async def test_read_modify_write_row_flattened_async(): @pytest.mark.asyncio async def test_read_modify_write_row_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2471,7 +2875,7 @@ def test_generate_initial_change_stream_partitions( request_type, transport: str = "grpc" ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2505,7 +2909,7 @@ def test_generate_initial_change_stream_partitions_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -2525,7 +2929,7 @@ async def test_generate_initial_change_stream_partitions_async( request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2561,7 +2965,7 @@ async def test_generate_initial_change_stream_partitions_async_from_dict(): def test_generate_initial_change_stream_partitions_field_headers(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2595,7 +2999,7 @@ def test_generate_initial_change_stream_partitions_field_headers(): @pytest.mark.asyncio async def test_generate_initial_change_stream_partitions_field_headers_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2629,7 +3033,7 @@ async def test_generate_initial_change_stream_partitions_field_headers_async(): def test_generate_initial_change_stream_partitions_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2661,7 +3065,7 @@ def test_generate_initial_change_stream_partitions_flattened(): def test_generate_initial_change_stream_partitions_flattened_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2677,7 +3081,7 @@ def test_generate_initial_change_stream_partitions_flattened_error(): @pytest.mark.asyncio async def test_generate_initial_change_stream_partitions_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2712,7 +3116,7 @@ async def test_generate_initial_change_stream_partitions_flattened_async(): @pytest.mark.asyncio async def test_generate_initial_change_stream_partitions_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2734,7 +3138,7 @@ async def test_generate_initial_change_stream_partitions_flattened_error_async() ) def test_read_change_stream(request_type, transport: str = "grpc"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2764,7 +3168,7 @@ def test_read_change_stream_empty_call(): # This test is a coverage failsafe to make sure that totally empty calls, # i.e. request == None and no flattened fields passed, work. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc", ) @@ -2783,7 +3187,7 @@ async def test_read_change_stream_async( transport: str = "grpc_asyncio", request_type=bigtable.ReadChangeStreamRequest ): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -2819,7 +3223,7 @@ async def test_read_change_stream_async_from_dict(): def test_read_change_stream_field_headers(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2851,7 +3255,7 @@ def test_read_change_stream_field_headers(): @pytest.mark.asyncio async def test_read_change_stream_field_headers_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Any value that is part of the HTTP/1.1 URI should be sent as @@ -2885,7 +3289,7 @@ async def test_read_change_stream_field_headers_async(): def test_read_change_stream_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2915,7 +3319,7 @@ def test_read_change_stream_flattened(): def test_read_change_stream_flattened_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2931,7 +3335,7 @@ def test_read_change_stream_flattened_error(): @pytest.mark.asyncio async def test_read_change_stream_flattened_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Mock the actual call within the gRPC stub, and fake the request. @@ -2964,7 +3368,7 @@ async def test_read_change_stream_flattened_async(): @pytest.mark.asyncio async def test_read_change_stream_flattened_error_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Attempting to call a method with both a request object and flattened @@ -2986,7 +3390,7 @@ async def test_read_change_stream_flattened_error_async(): ) def test_read_rows_rest(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -3042,7 +3446,7 @@ def test_read_rows_rest_required_fields(request_type=bigtable.ReadRowsRequest): # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).read_rows._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -3051,7 +3455,7 @@ def test_read_rows_rest_required_fields(request_type=bigtable.ReadRowsRequest): jsonified_request["tableName"] = "table_name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).read_rows._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -3060,7 +3464,7 @@ def test_read_rows_rest_required_fields(request_type=bigtable.ReadRowsRequest): assert jsonified_request["tableName"] == "table_name_value" client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -3106,7 +3510,7 @@ def test_read_rows_rest_required_fields(request_type=bigtable.ReadRowsRequest): def test_read_rows_rest_unset_required_fields(): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.read_rows._get_unset_required_fields({}) @@ -3116,7 +3520,7 @@ def test_read_rows_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_read_rows_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) @@ -3171,7 +3575,7 @@ def test_read_rows_rest_bad_request( transport: str = "rest", request_type=bigtable.ReadRowsRequest ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3193,7 +3597,7 @@ def test_read_rows_rest_bad_request( def test_read_rows_rest_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -3241,7 +3645,7 @@ def test_read_rows_rest_flattened(): def test_read_rows_rest_flattened_error(transport: str = "rest"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3257,7 +3661,7 @@ def test_read_rows_rest_flattened_error(transport: str = "rest"): def test_read_rows_rest_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -3270,7 +3674,7 @@ def test_read_rows_rest_error(): ) def test_sample_row_keys_rest(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -3330,7 +3734,7 @@ def test_sample_row_keys_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).sample_row_keys._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -3339,7 +3743,7 @@ def test_sample_row_keys_rest_required_fields( jsonified_request["tableName"] = "table_name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).sample_row_keys._get_unset_required_fields(jsonified_request) # Check that path parameters and body parameters are not mixing in. assert not set(unset_fields) - set(("app_profile_id",)) @@ -3350,7 +3754,7 @@ def test_sample_row_keys_rest_required_fields( assert jsonified_request["tableName"] == "table_name_value" client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -3395,7 +3799,7 @@ def test_sample_row_keys_rest_required_fields( def test_sample_row_keys_rest_unset_required_fields(): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.sample_row_keys._get_unset_required_fields({}) @@ -3405,7 +3809,7 @@ def test_sample_row_keys_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_sample_row_keys_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) @@ -3460,7 +3864,7 @@ def test_sample_row_keys_rest_bad_request( transport: str = "rest", request_type=bigtable.SampleRowKeysRequest ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3482,7 +3886,7 @@ def test_sample_row_keys_rest_bad_request( def test_sample_row_keys_rest_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -3530,7 +3934,7 @@ def test_sample_row_keys_rest_flattened(): def test_sample_row_keys_rest_flattened_error(transport: str = "rest"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3546,7 +3950,7 @@ def test_sample_row_keys_rest_flattened_error(transport: str = "rest"): def test_sample_row_keys_rest_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -3559,7 +3963,7 @@ def test_sample_row_keys_rest_error(): ) def test_mutate_row_rest(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -3606,7 +4010,7 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).mutate_row._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -3616,7 +4020,7 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) jsonified_request["rowKey"] = b"row_key_blob" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).mutate_row._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -3627,7 +4031,7 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) assert jsonified_request["rowKey"] == b"row_key_blob" client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -3670,7 +4074,7 @@ def test_mutate_row_rest_required_fields(request_type=bigtable.MutateRowRequest) def test_mutate_row_rest_unset_required_fields(): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.mutate_row._get_unset_required_fields({}) @@ -3689,7 +4093,7 @@ def test_mutate_row_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_mutate_row_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) @@ -3743,7 +4147,7 @@ def test_mutate_row_rest_bad_request( transport: str = "rest", request_type=bigtable.MutateRowRequest ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3765,7 +4169,7 @@ def test_mutate_row_rest_bad_request( def test_mutate_row_rest_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -3816,7 +4220,7 @@ def test_mutate_row_rest_flattened(): def test_mutate_row_rest_flattened_error(transport: str = "rest"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -3838,7 +4242,7 @@ def test_mutate_row_rest_flattened_error(transport: str = "rest"): def test_mutate_row_rest_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -3851,7 +4255,7 @@ def test_mutate_row_rest_error(): ) def test_mutate_rows_rest(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -3904,7 +4308,7 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).mutate_rows._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -3913,7 +4317,7 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques jsonified_request["tableName"] = "table_name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).mutate_rows._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -3922,7 +4326,7 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques assert jsonified_request["tableName"] == "table_name_value" client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -3968,7 +4372,7 @@ def test_mutate_rows_rest_required_fields(request_type=bigtable.MutateRowsReques def test_mutate_rows_rest_unset_required_fields(): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.mutate_rows._get_unset_required_fields({}) @@ -3986,7 +4390,7 @@ def test_mutate_rows_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_mutate_rows_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) @@ -4041,7 +4445,7 @@ def test_mutate_rows_rest_bad_request( transport: str = "rest", request_type=bigtable.MutateRowsRequest ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4063,7 +4467,7 @@ def test_mutate_rows_rest_bad_request( def test_mutate_rows_rest_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -4112,7 +4516,7 @@ def test_mutate_rows_rest_flattened(): def test_mutate_rows_rest_flattened_error(transport: str = "rest"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4129,7 +4533,7 @@ def test_mutate_rows_rest_flattened_error(transport: str = "rest"): def test_mutate_rows_rest_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -4142,7 +4546,7 @@ def test_mutate_rows_rest_error(): ) def test_check_and_mutate_row_rest(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -4194,7 +4598,7 @@ def test_check_and_mutate_row_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).check_and_mutate_row._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -4204,7 +4608,7 @@ def test_check_and_mutate_row_rest_required_fields( jsonified_request["rowKey"] = b"row_key_blob" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).check_and_mutate_row._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -4215,7 +4619,7 @@ def test_check_and_mutate_row_rest_required_fields( assert jsonified_request["rowKey"] == b"row_key_blob" client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -4258,7 +4662,7 @@ def test_check_and_mutate_row_rest_required_fields( def test_check_and_mutate_row_rest_unset_required_fields(): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.check_and_mutate_row._get_unset_required_fields({}) @@ -4276,7 +4680,7 @@ def test_check_and_mutate_row_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_check_and_mutate_row_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) @@ -4332,7 +4736,7 @@ def test_check_and_mutate_row_rest_bad_request( transport: str = "rest", request_type=bigtable.CheckAndMutateRowRequest ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4354,7 +4758,7 @@ def test_check_and_mutate_row_rest_bad_request( def test_check_and_mutate_row_rest_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -4421,7 +4825,7 @@ def test_check_and_mutate_row_rest_flattened(): def test_check_and_mutate_row_rest_flattened_error(transport: str = "rest"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4459,7 +4863,7 @@ def test_check_and_mutate_row_rest_flattened_error(transport: str = "rest"): def test_check_and_mutate_row_rest_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -4472,7 +4876,7 @@ def test_check_and_mutate_row_rest_error(): ) def test_ping_and_warm_rest(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -4518,7 +4922,7 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).ping_and_warm._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -4527,7 +4931,7 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq jsonified_request["name"] = "name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).ping_and_warm._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -4536,7 +4940,7 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq assert jsonified_request["name"] == "name_value" client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -4579,7 +4983,7 @@ def test_ping_and_warm_rest_required_fields(request_type=bigtable.PingAndWarmReq def test_ping_and_warm_rest_unset_required_fields(): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.ping_and_warm._get_unset_required_fields({}) @@ -4589,7 +4993,7 @@ def test_ping_and_warm_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_ping_and_warm_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) @@ -4643,7 +5047,7 @@ def test_ping_and_warm_rest_bad_request( transport: str = "rest", request_type=bigtable.PingAndWarmRequest ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4665,7 +5069,7 @@ def test_ping_and_warm_rest_bad_request( def test_ping_and_warm_rest_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -4706,7 +5110,7 @@ def test_ping_and_warm_rest_flattened(): def test_ping_and_warm_rest_flattened_error(transport: str = "rest"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4722,7 +5126,7 @@ def test_ping_and_warm_rest_flattened_error(transport: str = "rest"): def test_ping_and_warm_rest_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -4735,7 +5139,7 @@ def test_ping_and_warm_rest_error(): ) def test_read_modify_write_row_rest(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -4784,7 +5188,7 @@ def test_read_modify_write_row_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).read_modify_write_row._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -4794,7 +5198,7 @@ def test_read_modify_write_row_rest_required_fields( jsonified_request["rowKey"] = b"row_key_blob" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).read_modify_write_row._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -4805,7 +5209,7 @@ def test_read_modify_write_row_rest_required_fields( assert jsonified_request["rowKey"] == b"row_key_blob" client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -4848,7 +5252,7 @@ def test_read_modify_write_row_rest_required_fields( def test_read_modify_write_row_rest_unset_required_fields(): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.read_modify_write_row._get_unset_required_fields({}) @@ -4867,7 +5271,7 @@ def test_read_modify_write_row_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_read_modify_write_row_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) @@ -4923,7 +5327,7 @@ def test_read_modify_write_row_rest_bad_request( transport: str = "rest", request_type=bigtable.ReadModifyWriteRowRequest ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -4945,7 +5349,7 @@ def test_read_modify_write_row_rest_bad_request( def test_read_modify_write_row_rest_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -4992,7 +5396,7 @@ def test_read_modify_write_row_rest_flattened(): def test_read_modify_write_row_rest_flattened_error(transport: str = "rest"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5010,7 +5414,7 @@ def test_read_modify_write_row_rest_flattened_error(transport: str = "rest"): def test_read_modify_write_row_rest_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -5023,7 +5427,7 @@ def test_read_modify_write_row_rest_error(): ) def test_generate_initial_change_stream_partitions_rest(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -5080,7 +5484,7 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).generate_initial_change_stream_partitions._get_unset_required_fields( jsonified_request ) @@ -5091,7 +5495,7 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( jsonified_request["tableName"] = "table_name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).generate_initial_change_stream_partitions._get_unset_required_fields( jsonified_request ) @@ -5102,7 +5506,7 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( assert jsonified_request["tableName"] == "table_name_value" client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -5150,7 +5554,7 @@ def test_generate_initial_change_stream_partitions_rest_required_fields( def test_generate_initial_change_stream_partitions_rest_unset_required_fields(): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = ( @@ -5164,7 +5568,7 @@ def test_generate_initial_change_stream_partitions_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_generate_initial_change_stream_partitions_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) @@ -5226,7 +5630,7 @@ def test_generate_initial_change_stream_partitions_rest_bad_request( request_type=bigtable.GenerateInitialChangeStreamPartitionsRequest, ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5248,7 +5652,7 @@ def test_generate_initial_change_stream_partitions_rest_bad_request( def test_generate_initial_change_stream_partitions_rest_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -5300,7 +5704,7 @@ def test_generate_initial_change_stream_partitions_rest_flattened_error( transport: str = "rest", ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5316,7 +5720,7 @@ def test_generate_initial_change_stream_partitions_rest_flattened_error( def test_generate_initial_change_stream_partitions_rest_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) @@ -5329,7 +5733,7 @@ def test_generate_initial_change_stream_partitions_rest_error(): ) def test_read_change_stream_rest(request_type): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -5384,7 +5788,7 @@ def test_read_change_stream_rest_required_fields( # verify fields with default values are dropped unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).read_change_stream._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -5393,7 +5797,7 @@ def test_read_change_stream_rest_required_fields( jsonified_request["tableName"] = "table_name_value" unset_fields = transport_class( - credentials=ga_credentials.AnonymousCredentials() + credentials=_AnonymousCredentialsWithUniverseDomain() ).read_change_stream._get_unset_required_fields(jsonified_request) jsonified_request.update(unset_fields) @@ -5402,7 +5806,7 @@ def test_read_change_stream_rest_required_fields( assert jsonified_request["tableName"] == "table_name_value" client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) request = request_type(**request_init) @@ -5448,7 +5852,7 @@ def test_read_change_stream_rest_required_fields( def test_read_change_stream_rest_unset_required_fields(): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials + credentials=_AnonymousCredentialsWithUniverseDomain ) unset_fields = transport.read_change_stream._get_unset_required_fields({}) @@ -5458,7 +5862,7 @@ def test_read_change_stream_rest_unset_required_fields(): @pytest.mark.parametrize("null_interceptor", [True, False]) def test_read_change_stream_rest_interceptors(null_interceptor): transport = transports.BigtableRestTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), interceptor=None if null_interceptor else transports.BigtableRestInterceptor(), ) client = BigtableClient(transport=transport) @@ -5515,7 +5919,7 @@ def test_read_change_stream_rest_bad_request( transport: str = "rest", request_type=bigtable.ReadChangeStreamRequest ): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5537,7 +5941,7 @@ def test_read_change_stream_rest_bad_request( def test_read_change_stream_rest_flattened(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest", ) @@ -5585,7 +5989,7 @@ def test_read_change_stream_rest_flattened(): def test_read_change_stream_rest_flattened_error(transport: str = "rest"): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) @@ -5601,24 +6005,24 @@ def test_read_change_stream_rest_flattened_error(transport: str = "rest"): def test_read_change_stream_rest_error(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport="rest" + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="rest" ) def test_credentials_transport_error(): # It is an error to provide credentials and a transport instance. transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) with pytest.raises(ValueError): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport, ) # It is an error to provide a credentials file and a transport instance. transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) with pytest.raises(ValueError): client = BigtableClient( @@ -5628,7 +6032,7 @@ def test_credentials_transport_error(): # It is an error to provide an api_key and a transport instance. transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) options = client_options.ClientOptions() options.api_key = "api_key" @@ -5639,16 +6043,17 @@ def test_credentials_transport_error(): ) # It is an error to provide an api_key and a credential. - options = mock.Mock() + options = client_options.ClientOptions() options.api_key = "api_key" with pytest.raises(ValueError): client = BigtableClient( - client_options=options, credentials=ga_credentials.AnonymousCredentials() + client_options=options, + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # It is an error to provide scopes and a transport instance. transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) with pytest.raises(ValueError): client = BigtableClient( @@ -5660,7 +6065,7 @@ def test_credentials_transport_error(): def test_transport_instance(): # A client may be instantiated with a custom transport instance. transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) client = BigtableClient(transport=transport) assert client.transport is transport @@ -5669,13 +6074,13 @@ def test_transport_instance(): def test_transport_get_channel(): # A client may be instantiated with a custom transport instance. transport = transports.BigtableGrpcTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) channel = transport.grpc_channel assert channel transport = transports.BigtableGrpcAsyncIOTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) channel = transport.grpc_channel assert channel @@ -5692,7 +6097,7 @@ def test_transport_get_channel(): def test_transport_adc(transport_class): # Test default credentials are used if not provided. with mock.patch.object(google.auth, "default") as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) + adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) transport_class() adc.assert_called_once() @@ -5706,7 +6111,7 @@ def test_transport_adc(transport_class): ) def test_transport_kind(transport_name): transport = BigtableClient.get_transport_class(transport_name)( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) assert transport.kind == transport_name @@ -5714,7 +6119,7 @@ def test_transport_kind(transport_name): def test_transport_grpc_default(): # A client should use the gRPC transport by default. client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) assert isinstance( client.transport, @@ -5726,7 +6131,7 @@ def test_bigtable_base_transport_error(): # Passing both a credentials object and credentials_file should raise an error with pytest.raises(core_exceptions.DuplicateCredentialArgs): transport = transports.BigtableTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), credentials_file="credentials.json", ) @@ -5738,7 +6143,7 @@ def test_bigtable_base_transport(): ) as Transport: Transport.return_value = None transport = transports.BigtableTransport( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), ) # Every method on the transport should just blindly @@ -5778,7 +6183,7 @@ def test_bigtable_base_transport_with_credentials_file(): "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None - load_creds.return_value = (ga_credentials.AnonymousCredentials(), None) + load_creds.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) transport = transports.BigtableTransport( credentials_file="credentials.json", quota_project_id="octopus", @@ -5804,7 +6209,7 @@ def test_bigtable_base_transport_with_adc(): "google.cloud.bigtable_v2.services.bigtable.transports.BigtableTransport._prep_wrapped_messages" ) as Transport: Transport.return_value = None - adc.return_value = (ga_credentials.AnonymousCredentials(), None) + adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) transport = transports.BigtableTransport() adc.assert_called_once() @@ -5812,7 +6217,7 @@ def test_bigtable_base_transport_with_adc(): def test_bigtable_auth_adc(): # If no credentials are provided, we should use ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) + adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) BigtableClient() adc.assert_called_once_with( scopes=None, @@ -5839,7 +6244,7 @@ def test_bigtable_transport_auth_adc(transport_class): # If credentials and host are not provided, the transport class should use # ADC credentials. with mock.patch.object(google.auth, "default", autospec=True) as adc: - adc.return_value = (ga_credentials.AnonymousCredentials(), None) + adc.return_value = (_AnonymousCredentialsWithUniverseDomain(), None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) adc.assert_called_once_with( scopes=["1", "2"], @@ -5893,7 +6298,7 @@ def test_bigtable_transport_create_channel(transport_class, grpc_helpers): ) as adc, mock.patch.object( grpc_helpers, "create_channel", autospec=True ) as create_channel: - creds = ga_credentials.AnonymousCredentials() + creds = _AnonymousCredentialsWithUniverseDomain() adc.return_value = (creds, None) transport_class(quota_project_id="octopus", scopes=["1", "2"]) @@ -5925,7 +6330,7 @@ def test_bigtable_transport_create_channel(transport_class, grpc_helpers): [transports.BigtableGrpcTransport, transports.BigtableGrpcAsyncIOTransport], ) def test_bigtable_grpc_transport_client_cert_source_for_mtls(transport_class): - cred = ga_credentials.AnonymousCredentials() + cred = _AnonymousCredentialsWithUniverseDomain() # Check ssl_channel_credentials is used if provided. with mock.patch.object(transport_class, "create_channel") as mock_create_channel: @@ -5963,7 +6368,7 @@ def test_bigtable_grpc_transport_client_cert_source_for_mtls(transport_class): def test_bigtable_http_transport_client_cert_source_for_mtls(): - cred = ga_credentials.AnonymousCredentials() + cred = _AnonymousCredentialsWithUniverseDomain() with mock.patch( "google.auth.transport.requests.AuthorizedSession.configure_mtls_channel" ) as mock_configure_mtls_channel: @@ -5983,7 +6388,7 @@ def test_bigtable_http_transport_client_cert_source_for_mtls(): ) def test_bigtable_host_no_port(transport_name): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), client_options=client_options.ClientOptions( api_endpoint="bigtable.googleapis.com" ), @@ -6006,7 +6411,7 @@ def test_bigtable_host_no_port(transport_name): ) def test_bigtable_host_with_port(transport_name): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), client_options=client_options.ClientOptions( api_endpoint="bigtable.googleapis.com:8000" ), @@ -6026,8 +6431,8 @@ def test_bigtable_host_with_port(transport_name): ], ) def test_bigtable_client_transport_session_collision(transport_name): - creds1 = ga_credentials.AnonymousCredentials() - creds2 = ga_credentials.AnonymousCredentials() + creds1 = _AnonymousCredentialsWithUniverseDomain() + creds2 = _AnonymousCredentialsWithUniverseDomain() client1 = BigtableClient( credentials=creds1, transport=transport_name, @@ -6110,7 +6515,7 @@ def test_bigtable_transport_channel_mtls_with_client_cert_source(transport_class mock_grpc_channel = mock.Mock() grpc_create_channel.return_value = mock_grpc_channel - cred = ga_credentials.AnonymousCredentials() + cred = _AnonymousCredentialsWithUniverseDomain() with pytest.warns(DeprecationWarning): with mock.patch.object(google.auth, "default") as adc: adc.return_value = (cred, None) @@ -6342,7 +6747,7 @@ def test_client_with_default_client_info(): transports.BigtableTransport, "_prep_wrapped_messages" ) as prep: client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -6352,7 +6757,7 @@ def test_client_with_default_client_info(): ) as prep: transport_class = BigtableClient.get_transport_class() transport = transport_class( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), client_info=client_info, ) prep.assert_called_once_with(client_info) @@ -6361,7 +6766,7 @@ def test_client_with_default_client_info(): @pytest.mark.asyncio async def test_transport_close_async(): client = BigtableAsyncClient( - credentials=ga_credentials.AnonymousCredentials(), + credentials=_AnonymousCredentialsWithUniverseDomain(), transport="grpc_asyncio", ) with mock.patch.object( @@ -6380,7 +6785,7 @@ def test_transport_close(): for transport, close_name in transports.items(): client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport ) with mock.patch.object( type(getattr(client.transport, close_name)), "close" @@ -6397,7 +6802,7 @@ def test_client_ctx(): ] for transport in transports: client = BigtableClient( - credentials=ga_credentials.AnonymousCredentials(), transport=transport + credentials=_AnonymousCredentialsWithUniverseDomain(), transport=transport ) # Test client calls underlying transport. with mock.patch.object(type(client.transport), "close") as close: @@ -6428,7 +6833,9 @@ def test_api_key_credentials(client_class, transport_class): patched.assert_called_once_with( credentials=mock_cred, credentials_file=None, - host=client.DEFAULT_ENDPOINT, + host=client._DEFAULT_ENDPOINT_TEMPLATE.format( + UNIVERSE_DOMAIN=client._DEFAULT_UNIVERSE + ), scopes=None, client_cert_source_for_mtls=None, quota_project_id=None,