diff --git a/.kokoro/presubmit/conformance.cfg b/.kokoro/presubmit/conformance.cfg new file mode 100644 index 000000000..4f44e8a78 --- /dev/null +++ b/.kokoro/presubmit/conformance.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "NOX_SESSION" + value: "conformance" +} diff --git a/google/cloud/bigtable/data/_async/_mutate_rows.py b/google/cloud/bigtable/data/_async/_mutate_rows.py index d4f5bc215..baae205d9 100644 --- a/google/cloud/bigtable/data/_async/_mutate_rows.py +++ b/google/cloud/bigtable/data/_async/_mutate_rows.py @@ -15,6 +15,7 @@ from __future__ import annotations from typing import TYPE_CHECKING +import asyncio import functools from google.api_core import exceptions as core_exceptions @@ -183,6 +184,13 @@ async def _run_attempt(self): self._handle_entry_error(orig_idx, entry_error) # remove processed entry from active list del active_request_indices[result.index] + except asyncio.CancelledError: + # when retry wrapper timeout expires, the operation is cancelled + # make sure incomplete indices are tracked, + # but don't record exception (it will be raised by wrapper) + # TODO: remove asyncio.wait_for in retry wrapper. Let grpc call handle expiration + self.remaining_indices.extend(active_request_indices.values()) + raise except Exception as exc: # add this exception to list for each mutation that wasn't # already handled, and update remaining_indices if mutation is retryable diff --git a/google/cloud/bigtable/data/_async/client.py b/google/cloud/bigtable/data/_async/client.py index 1ca9e0f7b..8524cd9aa 100644 --- a/google/cloud/bigtable/data/_async/client.py +++ b/google/cloud/bigtable/data/_async/client.py @@ -30,6 +30,7 @@ import warnings import sys import random +import os from collections import namedtuple @@ -38,10 +39,12 @@ from google.cloud.bigtable_v2.services.bigtable.async_client import DEFAULT_CLIENT_INFO from google.cloud.bigtable_v2.services.bigtable.transports.pooled_grpc_asyncio import ( PooledBigtableGrpcAsyncIOTransport, + PooledChannel, ) from google.cloud.bigtable_v2.types.bigtable import PingAndWarmRequest from google.cloud.client import ClientWithProject from google.api_core.exceptions import GoogleAPICallError +from google.cloud.environment_vars import BIGTABLE_EMULATOR # type: ignore from google.api_core import retry_async as retries from google.api_core import exceptions as core_exceptions from google.cloud.bigtable.data._async._read_rows import _ReadRowsOperationAsync @@ -150,18 +153,35 @@ def __init__( # keep track of table objects associated with each instance # only remove instance from _active_instances when all associated tables remove it self._instance_owners: dict[_WarmedInstanceKey, Set[int]] = {} - # attempt to start background tasks self._channel_init_time = time.monotonic() self._channel_refresh_tasks: list[asyncio.Task[None]] = [] - try: - self._start_background_channel_refresh() - except RuntimeError: + self._emulator_host = os.getenv(BIGTABLE_EMULATOR) + if self._emulator_host is not None: + # connect to an emulator host warnings.warn( - f"{self.__class__.__name__} should be started in an " - "asyncio event loop. Channel refresh will not be started", + "Connecting to Bigtable emulator at {}".format(self._emulator_host), RuntimeWarning, stacklevel=2, ) + self.transport._grpc_channel = PooledChannel( + pool_size=pool_size, + host=self._emulator_host, + insecure=True, + ) + # refresh cached stubs to use emulator pool + self.transport._stubs = {} + self.transport._prep_wrapped_messages(client_info) + else: + # attempt to start background channel refresh tasks + try: + self._start_background_channel_refresh() + except RuntimeError: + warnings.warn( + f"{self.__class__.__name__} should be started in an " + "asyncio event loop. Channel refresh will not be started", + RuntimeWarning, + stacklevel=2, + ) @staticmethod def _client_version() -> str: @@ -176,7 +196,7 @@ def _start_background_channel_refresh(self) -> None: Raises: - RuntimeError if not called in an asyncio event loop """ - if not self._channel_refresh_tasks: + if not self._channel_refresh_tasks and not self._emulator_host: # raise RuntimeError if there is no event loop asyncio.get_running_loop() for channel_idx in range(self.transport.pool_size): diff --git a/noxfile.py b/noxfile.py index 16447778e..4b57e617f 100644 --- a/noxfile.py +++ b/noxfile.py @@ -278,6 +278,27 @@ def system_emulated(session): os.killpg(os.getpgid(p.pid), signal.SIGKILL) +@nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) +def conformance(session): + """ + Run the set of shared bigtable conformance tests + """ + TEST_REPO_URL = "https://github.com/googleapis/cloud-bigtable-clients-test.git" + CLONE_REPO_DIR = "cloud-bigtable-clients-test" + # install dependencies + constraints_path = str( + CURRENT_DIRECTORY / "testing" / f"constraints-{session.python}.txt" + ) + install_unittest_dependencies(session, "-c", constraints_path) + with session.chdir("test_proxy"): + # download the conformance test suite + clone_dir = os.path.join(CURRENT_DIRECTORY, CLONE_REPO_DIR) + if not os.path.exists(clone_dir): + print("downloading copy of test repo") + session.run("git", "clone", TEST_REPO_URL, CLONE_REPO_DIR, external=True) + session.run("bash", "-e", "run_tests.sh", external=True) + + @nox.session(python=SYSTEM_TEST_PYTHON_VERSIONS) def system(session): """Run the system test suite.""" diff --git a/test_proxy/README.md b/test_proxy/README.md new file mode 100644 index 000000000..08741fd5d --- /dev/null +++ b/test_proxy/README.md @@ -0,0 +1,60 @@ +# CBT Python Test Proxy + +The CBT test proxy is intended for running conformance tests for Cloud Bigtable Python Client. + +## Option 1: Run Tests with Nox + +You can run the conformance tests in a single line by calling `nox -s conformance` from the repo root + + +``` +cd python-bigtable/test_proxy +nox -s conformance +``` + +## Option 2: Run processes manually + +### Start test proxy + +You can use `test_proxy.py` to launch a new test proxy process directly + +``` +cd python-bigtable/test_proxy +python test_proxy.py +``` + +The port can be set by passing in an extra positional argument + +``` +cd python-bigtable/test_proxy +python test_proxy.py --port 8080 +``` + +You can run the test proxy against the previous `v2` client by running it with the `--legacy-client` flag: + +``` +python test_proxy.py --legacy-client +``` + +### Run the test cases + +Prerequisites: +- If you have not already done so, [install golang](https://go.dev/doc/install). +- Before running tests, [launch an instance of the test proxy](#start-test-proxy) +in a separate shell session, and make note of the port + + +Clone and navigate to the go test library: + +``` +git clone https://github.com/googleapis/cloud-bigtable-clients-test.git +cd cloud-bigtable-clients-test/tests +``` + + +Launch the tests + +``` +go test -v -proxy_addr=:50055 +``` + diff --git a/test_proxy/handlers/client_handler_data.py b/test_proxy/handlers/client_handler_data.py new file mode 100644 index 000000000..43ff5d634 --- /dev/null +++ b/test_proxy/handlers/client_handler_data.py @@ -0,0 +1,214 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module contains the client handler process for proxy_server.py. +""" +import os + +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.cloud.bigtable.data import BigtableDataClientAsync + + +def error_safe(func): + """ + Catch and pass errors back to the grpc_server_process + Also check if client is closed before processing requests + """ + async def wrapper(self, *args, **kwargs): + try: + if self.closed: + raise RuntimeError("client is closed") + return await func(self, *args, **kwargs) + except (Exception, NotImplementedError) as e: + # exceptions should be raised in grpc_server_process + return encode_exception(e) + + return wrapper + + +def encode_exception(exc): + """ + Encode an exception or chain of exceptions to pass back to grpc_handler + """ + from google.api_core.exceptions import GoogleAPICallError + error_msg = f"{type(exc).__name__}: {exc}" + result = {"error": error_msg} + if exc.__cause__: + result["cause"] = encode_exception(exc.__cause__) + if hasattr(exc, "exceptions"): + result["subexceptions"] = [encode_exception(e) for e in exc.exceptions] + if hasattr(exc, "index"): + result["index"] = exc.index + if isinstance(exc, GoogleAPICallError): + if exc.grpc_status_code is not None: + result["code"] = exc.grpc_status_code.value[0] + elif exc.code is not None: + result["code"] = int(exc.code) + else: + result["code"] = -1 + elif result.get("cause", {}).get("code", None): + # look for code code in cause + result["code"] = result["cause"]["code"] + elif result.get("subexceptions", None): + # look for code in subexceptions + for subexc in result["subexceptions"]: + if subexc.get("code", None): + result["code"] = subexc["code"] + return result + + +class TestProxyClientHandler: + """ + Implements the same methods as the grpc server, but handles the client + library side of the request. + + Requests received in TestProxyGrpcServer are converted to a dictionary, + and supplied to the TestProxyClientHandler methods as kwargs. + The client response is then returned back to the TestProxyGrpcServer + """ + + def __init__( + self, + data_target=None, + project_id=None, + instance_id=None, + app_profile_id=None, + per_operation_timeout=None, + **kwargs, + ): + self.closed = False + # use emulator + os.environ[BIGTABLE_EMULATOR] = data_target + self.client = BigtableDataClientAsync(project=project_id) + self.instance_id = instance_id + self.app_profile_id = app_profile_id + self.per_operation_timeout = per_operation_timeout + + def close(self): + # TODO: call self.client.close() + self.closed = True + + @error_safe + async def ReadRows(self, request, **kwargs): + table_id = request.pop("table_name").split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + result_list = await table.read_rows(request, **kwargs) + # pack results back into protobuf-parsable format + serialized_response = [row._to_dict() for row in result_list] + return serialized_response + + @error_safe + async def ReadRow(self, row_key, **kwargs): + table_id = kwargs.pop("table_name").split("/")[-1] + app_profile_id = self.app_profile_id or kwargs.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + result_row = await table.read_row(row_key, **kwargs) + # pack results back into protobuf-parsable format + if result_row: + return result_row._to_dict() + else: + return "None" + + @error_safe + async def MutateRow(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import Mutation + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + row_key = request["row_key"] + mutations = [Mutation._from_dict(d) for d in request["mutations"]] + await table.mutate_row(row_key, mutations, **kwargs) + return "OK" + + @error_safe + async def BulkMutateRows(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import RowMutationEntry + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + entry_list = [RowMutationEntry._from_dict(entry) for entry in request["entries"]] + await table.bulk_mutate_rows(entry_list, **kwargs) + return "OK" + + @error_safe + async def CheckAndMutateRow(self, request, **kwargs): + from google.cloud.bigtable.data.mutations import Mutation, SetCell + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + row_key = request["row_key"] + # add default values for incomplete dicts, so they can still be parsed to objects + true_mutations = [] + for mut_dict in request.get("true_mutations", []): + try: + true_mutations.append(Mutation._from_dict(mut_dict)) + except ValueError: + # invalid mutation type. Conformance test may be sending generic empty request + mutation = SetCell("", "", "", 0) + true_mutations.append(mutation) + false_mutations = [] + for mut_dict in request.get("false_mutations", []): + try: + false_mutations.append(Mutation._from_dict(mut_dict)) + except ValueError: + # invalid mutation type. Conformance test may be sending generic empty request + false_mutations.append(SetCell("", "", "", 0)) + predicate_filter = request.get("predicate_filter", None) + result = await table.check_and_mutate_row( + row_key, + predicate_filter, + true_case_mutations=true_mutations, + false_case_mutations=false_mutations, + **kwargs, + ) + return result + + @error_safe + async def ReadModifyWriteRow(self, request, **kwargs): + from google.cloud.bigtable.data.read_modify_write_rules import IncrementRule + from google.cloud.bigtable.data.read_modify_write_rules import AppendValueRule + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + row_key = request["row_key"] + rules = [] + for rule_dict in request.get("rules", []): + qualifier = rule_dict["column_qualifier"] + if "append_value" in rule_dict: + new_rule = AppendValueRule(rule_dict["family_name"], qualifier, rule_dict["append_value"]) + else: + new_rule = IncrementRule(rule_dict["family_name"], qualifier, rule_dict["increment_amount"]) + rules.append(new_rule) + result = await table.read_modify_write_row(row_key, rules, **kwargs) + # pack results back into protobuf-parsable format + if result: + return result._to_dict() + else: + return "None" + + @error_safe + async def SampleRowKeys(self, request, **kwargs): + table_id = request["table_name"].split("/")[-1] + app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + table = self.client.get_table(self.instance_id, table_id, app_profile_id) + kwargs["operation_timeout"] = kwargs.get("operation_timeout", self.per_operation_timeout) or 20 + result = await table.sample_row_keys(**kwargs) + return result diff --git a/test_proxy/handlers/client_handler_legacy.py b/test_proxy/handlers/client_handler_legacy.py new file mode 100644 index 000000000..b423165f1 --- /dev/null +++ b/test_proxy/handlers/client_handler_legacy.py @@ -0,0 +1,132 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +This module contains the client handler process for proxy_server.py. +""" +import os + +from google.cloud.environment_vars import BIGTABLE_EMULATOR +from google.cloud.bigtable.client import Client + +import client_handler_data as client_handler + +import warnings +warnings.filterwarnings("ignore", category=DeprecationWarning) + + +class LegacyTestProxyClientHandler(client_handler.TestProxyClientHandler): + + def __init__( + self, + data_target=None, + project_id=None, + instance_id=None, + app_profile_id=None, + per_operation_timeout=None, + **kwargs, + ): + self.closed = False + # use emulator + os.environ[BIGTABLE_EMULATOR] = data_target + self.client = Client(project=project_id) + self.instance_id = instance_id + self.app_profile_id = app_profile_id + self.per_operation_timeout = per_operation_timeout + + async def close(self): + self.closed = True + + @client_handler.error_safe + async def ReadRows(self, request, **kwargs): + table_id = request["table_name"].split("/")[-1] + # app_profile_id = self.app_profile_id or request.get("app_profile_id", None) + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + + limit = request.get("rows_limit", None) + start_key = request.get("rows", {}).get("row_keys", [None])[0] + end_key = request.get("rows", {}).get("row_keys", [None])[-1] + end_inclusive = request.get("rows", {}).get("row_ranges", [{}])[-1].get("end_key_closed", True) + + row_list = [] + for row in table.read_rows(start_key=start_key, end_key=end_key, limit=limit, end_inclusive=end_inclusive): + # parse results into proto formatted dict + dict_val = {"row_key": row.row_key} + for family, family_cells in row.cells.items(): + family_dict = {"name": family} + for qualifier, qualifier_cells in family_cells.items(): + column_dict = {"qualifier": qualifier} + for cell in qualifier_cells: + cell_dict = { + "value": cell.value, + "timestamp_micros": cell.timestamp.timestamp() * 1000000, + "labels": cell.labels, + } + column_dict.setdefault("cells", []).append(cell_dict) + family_dict.setdefault("columns", []).append(column_dict) + dict_val.setdefault("families", []).append(family_dict) + row_list.append(dict_val) + return row_list + + @client_handler.error_safe + async def MutateRow(self, request, **kwargs): + from google.cloud.bigtable.row import DirectRow + table_id = request["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + row_key = request["row_key"] + new_row = DirectRow(row_key, table) + for m_dict in request.get("mutations", []): + if m_dict.get("set_cell"): + details = m_dict["set_cell"] + new_row.set_cell(details["family_name"], details["column_qualifier"], details["value"], timestamp=details["timestamp_micros"]) + elif m_dict.get("delete_from_column"): + details = m_dict["delete_from_column"] + new_row.delete_cell(details["family_name"], details["column_qualifier"], timestamp=details["timestamp_micros"]) + elif m_dict.get("delete_from_family"): + details = m_dict["delete_from_family"] + new_row.delete_cells(details["family_name"], timestamp=details["timestamp_micros"]) + elif m_dict.get("delete_from_row"): + new_row.delete() + async with self.measure_call(): + table.mutate_rows([new_row]) + return "OK" + + @client_handler.error_safe + async def BulkMutateRows(self, request, **kwargs): + from google.cloud.bigtable.row import DirectRow + table_id = request["table_name"].split("/")[-1] + instance = self.client.instance(self.instance_id) + table = instance.table(table_id) + rows = [] + for entry in request.get("entries", []): + row_key = entry["row_key"] + new_row = DirectRow(row_key, table) + for m_dict in entry.get("mutations", {}): + if m_dict.get("set_cell"): + details = m_dict["set_cell"] + new_row.set_cell(details["family_name"], details["column_qualifier"], details["value"], timestamp=details.get("timestamp_micros",None)) + elif m_dict.get("delete_from_column"): + details = m_dict["delete_from_column"] + new_row.delete_cell(details["family_name"], details["column_qualifier"], timestamp=details["timestamp_micros"]) + elif m_dict.get("delete_from_family"): + details = m_dict["delete_from_family"] + new_row.delete_cells(details["family_name"], timestamp=details["timestamp_micros"]) + elif m_dict.get("delete_from_row"): + new_row.delete() + rows.append(new_row) + async with self.measure_call(): + table.mutate_rows(rows) + return "OK" + diff --git a/test_proxy/handlers/grpc_handler.py b/test_proxy/handlers/grpc_handler.py new file mode 100644 index 000000000..2c70778dd --- /dev/null +++ b/test_proxy/handlers/grpc_handler.py @@ -0,0 +1,148 @@ + +import time + +import test_proxy_pb2 +import test_proxy_pb2_grpc +import data_pb2 +import bigtable_pb2 +from google.rpc.status_pb2 import Status +from google.protobuf import json_format + + +class TestProxyGrpcServer(test_proxy_pb2_grpc.CloudBigtableV2TestProxyServicer): + """ + Implements a grpc server that proxies conformance test requests to the client library + + Due to issues with using protoc-compiled protos and client-library + proto-plus objects in the same process, this server defers requests to + matching methods in a TestProxyClientHandler instance in a separate + process. + This happens invisbly in the decorator @delegate_to_client_handler, with the + results attached to each request as a client_response kwarg + """ + + def __init__(self, request_q, queue_pool): + self.open_queues = list(range(len(queue_pool))) + self.queue_pool = queue_pool + self.request_q = request_q + + def delegate_to_client_handler(func, timeout_seconds=300): + """ + Decorator that transparently passes a request to the client + handler process, and then attaches the resonse to the wrapped call + """ + + def wrapper(self, request, context, **kwargs): + deadline = time.time() + timeout_seconds + json_dict = json_format.MessageToDict(request) + out_idx = self.open_queues.pop() + json_dict["proxy_request"] = func.__name__ + json_dict["response_queue_idx"] = out_idx + out_q = self.queue_pool[out_idx] + self.request_q.put(json_dict) + # wait for response + while time.time() < deadline: + if not out_q.empty(): + response = out_q.get() + self.open_queues.append(out_idx) + if isinstance(response, Exception): + raise response + else: + return func( + self, + request, + context, + client_response=response, + **kwargs, + ) + time.sleep(1e-4) + + return wrapper + + + @delegate_to_client_handler + def CreateClient(self, request, context, client_response=None): + return test_proxy_pb2.CreateClientResponse() + + @delegate_to_client_handler + def CloseClient(self, request, context, client_response=None): + return test_proxy_pb2.CloseClientResponse() + + @delegate_to_client_handler + def RemoveClient(self, request, context, client_response=None): + return test_proxy_pb2.RemoveClientResponse() + + @delegate_to_client_handler + def ReadRows(self, request, context, client_response=None): + status = Status() + rows = [] + if isinstance(client_response, dict) and "error" in client_response: + status = Status(code=5, message=client_response["error"]) + else: + rows = [data_pb2.Row(**d) for d in client_response] + result = test_proxy_pb2.RowsResult(row=rows, status=status) + return result + + @delegate_to_client_handler + def ReadRow(self, request, context, client_response=None): + status = Status() + row = None + if isinstance(client_response, dict) and "error" in client_response: + status=Status(code=client_response.get("code", 5), message=client_response.get("error")) + elif client_response != "None": + row = data_pb2.Row(**client_response) + result = test_proxy_pb2.RowResult(row=row, status=status) + return result + + @delegate_to_client_handler + def MutateRow(self, request, context, client_response=None): + status = Status() + if isinstance(client_response, dict) and "error" in client_response: + status = Status(code=client_response.get("code", 5), message=client_response["error"]) + return test_proxy_pb2.MutateRowResult(status=status) + + @delegate_to_client_handler + def BulkMutateRows(self, request, context, client_response=None): + status = Status() + entries = [] + if isinstance(client_response, dict) and "error" in client_response: + entries = [bigtable_pb2.MutateRowsResponse.Entry(index=exc_dict.get("index",1), status=Status(code=exc_dict.get("code", 5))) for exc_dict in client_response.get("subexceptions", [])] + if not entries: + # only return failure on the overall request if there are failed entries + status = Status(code=client_response.get("code", 5), message=client_response["error"]) + # TODO: protos were updated. entry is now entries: https://github.com/googleapis/cndb-client-testing-protos/commit/e6205a2bba04acc10d12421a1402870b4a525fb3 + response = test_proxy_pb2.MutateRowsResult(status=status, entry=entries) + return response + + @delegate_to_client_handler + def CheckAndMutateRow(self, request, context, client_response=None): + if isinstance(client_response, dict) and "error" in client_response: + status = Status(code=client_response.get("code", 5), message=client_response["error"]) + response = test_proxy_pb2.CheckAndMutateRowResult(status=status) + else: + result = bigtable_pb2.CheckAndMutateRowResponse(predicate_matched=client_response) + response = test_proxy_pb2.CheckAndMutateRowResult(result=result, status=Status()) + return response + + @delegate_to_client_handler + def ReadModifyWriteRow(self, request, context, client_response=None): + status = Status() + row = None + if isinstance(client_response, dict) and "error" in client_response: + status = Status(code=client_response.get("code", 5), message=client_response.get("error")) + elif client_response != "None": + row = data_pb2.Row(**client_response) + result = test_proxy_pb2.RowResult(row=row, status=status) + return result + + @delegate_to_client_handler + def SampleRowKeys(self, request, context, client_response=None): + status = Status() + sample_list = [] + if isinstance(client_response, dict) and "error" in client_response: + status = Status(code=client_response.get("code", 5), message=client_response.get("error")) + else: + for sample in client_response: + sample_list.append(bigtable_pb2.SampleRowKeysResponse(offset_bytes=sample[1], row_key=sample[0])) + # TODO: protos were updated. sample is now samples: https://github.com/googleapis/cndb-client-testing-protos/commit/e6205a2bba04acc10d12421a1402870b4a525fb3 + return test_proxy_pb2.SampleRowKeysResult(status=status, sample=sample_list) diff --git a/test_proxy/noxfile.py b/test_proxy/noxfile.py new file mode 100644 index 000000000..bebf247b7 --- /dev/null +++ b/test_proxy/noxfile.py @@ -0,0 +1,80 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import absolute_import +import os +import pathlib +import re +from colorlog.escape_codes import parse_colors + +import nox + + +DEFAULT_PYTHON_VERSION = "3.10" + +PROXY_SERVER_PORT=os.environ.get("PROXY_SERVER_PORT", "50055") +PROXY_CLIENT_VERSION=os.environ.get("PROXY_CLIENT_VERSION", None) + +CURRENT_DIRECTORY = pathlib.Path(__file__).parent.absolute() +REPO_ROOT_DIRECTORY = CURRENT_DIRECTORY.parent + +nox.options.sessions = ["run_proxy", "conformance_tests"] + +TEST_REPO_URL = "https://github.com/googleapis/cloud-bigtable-clients-test.git" +CLONE_REPO_DIR = "cloud-bigtable-clients-test" + +# Error if a python version is missing +nox.options.error_on_missing_interpreters = True + + +def default(session): + """ + if nox is run directly, run the test_proxy session + """ + test_proxy(session) + + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def conformance_tests(session): + """ + download and run the conformance test suite against the test proxy + """ + import subprocess + import time + # download the conformance test suite + clone_dir = os.path.join(CURRENT_DIRECTORY, CLONE_REPO_DIR) + if not os.path.exists(clone_dir): + print("downloading copy of test repo") + session.run("git", "clone", TEST_REPO_URL, CLONE_REPO_DIR) + # start tests + with session.chdir(f"{clone_dir}/tests"): + session.run("go", "test", "-v", f"-proxy_addr=:{PROXY_SERVER_PORT}") + +@nox.session(python=DEFAULT_PYTHON_VERSION) +def test_proxy(session): + """Start up the test proxy""" + # Install all dependencies, then install this package into the + # virtualenv's dist-packages. + # session.install( + # "grpcio", + # ) + if PROXY_CLIENT_VERSION is not None: + # install released version of the library + session.install(f"python-bigtable=={PROXY_CLIENT_VERSION}") + else: + # install the library from the source + session.install("-e", str(REPO_ROOT_DIRECTORY)) + session.install("-e", str(REPO_ROOT_DIRECTORY / "python-api-core")) + + session.run("python", "test_proxy.py", "--port", PROXY_SERVER_PORT, *session.posargs,) diff --git a/test_proxy/protos/bigtable_pb2.py b/test_proxy/protos/bigtable_pb2.py new file mode 100644 index 000000000..936a4ed55 --- /dev/null +++ b/test_proxy/protos/bigtable_pb2.py @@ -0,0 +1,145 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/v2/bigtable.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2 +from google.api import resource_pb2 as google_dot_api_dot_resource__pb2 +from google.api import routing_pb2 as google_dot_api_dot_routing__pb2 +import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2 +import request_stats_pb2 as google_dot_bigtable_dot_v2_dot_request__stats__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.protobuf import timestamp_pb2 as google_dot_protobuf_dot_timestamp__pb2 +from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2 +from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n!google/bigtable/v2/bigtable.proto\x12\x12google.bigtable.v2\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a\x18google/api/routing.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a&google/bigtable/v2/request_stats.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x17google/rpc/status.proto\"\x90\x03\n\x0fReadRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12(\n\x04rows\x18\x02 \x01(\x0b\x32\x1a.google.bigtable.v2.RowSet\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x12\n\nrows_limit\x18\x04 \x01(\x03\x12P\n\x12request_stats_view\x18\x06 \x01(\x0e\x32\x34.google.bigtable.v2.ReadRowsRequest.RequestStatsView\"f\n\x10RequestStatsView\x12\"\n\x1eREQUEST_STATS_VIEW_UNSPECIFIED\x10\x00\x12\x16\n\x12REQUEST_STATS_NONE\x10\x01\x12\x16\n\x12REQUEST_STATS_FULL\x10\x02\"\xb1\x03\n\x10ReadRowsResponse\x12>\n\x06\x63hunks\x18\x01 \x03(\x0b\x32..google.bigtable.v2.ReadRowsResponse.CellChunk\x12\x1c\n\x14last_scanned_row_key\x18\x02 \x01(\x0c\x12\x37\n\rrequest_stats\x18\x03 \x01(\x0b\x32 .google.bigtable.v2.RequestStats\x1a\x85\x02\n\tCellChunk\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x31\n\x0b\x66\x61mily_name\x18\x02 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12.\n\tqualifier\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.BytesValue\x12\x18\n\x10timestamp_micros\x18\x04 \x01(\x03\x12\x0e\n\x06labels\x18\x05 \x03(\t\x12\r\n\x05value\x18\x06 \x01(\x0c\x12\x12\n\nvalue_size\x18\x07 \x01(\x05\x12\x13\n\treset_row\x18\x08 \x01(\x08H\x00\x12\x14\n\ncommit_row\x18\t \x01(\x08H\x00\x42\x0c\n\nrow_status\"n\n\x14SampleRowKeysRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\">\n\x15SampleRowKeysResponse\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x14\n\x0coffset_bytes\x18\x02 \x01(\x03\"\xb6\x01\n\x10MutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x34\n\tmutations\x18\x03 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\x13\n\x11MutateRowResponse\"\xfe\x01\n\x11MutateRowsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x03 \x01(\t\x12\x41\n\x07\x65ntries\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.MutateRowsRequest.EntryB\x03\xe0\x41\x02\x1aN\n\x05\x45ntry\x12\x0f\n\x07row_key\x18\x01 \x01(\x0c\x12\x34\n\tmutations\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.MutationB\x03\xe0\x41\x02\"\x8f\x01\n\x12MutateRowsResponse\x12=\n\x07\x65ntries\x18\x01 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\x1a:\n\x05\x45ntry\x12\r\n\x05index\x18\x01 \x01(\x03\x12\"\n\x06status\x18\x02 \x01(\x0b\x32\x12.google.rpc.Status\"\xae\x02\n\x18\x43heckAndMutateRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x07 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12\x37\n\x10predicate_filter\x18\x06 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x34\n\x0etrue_mutations\x18\x04 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\x12\x35\n\x0f\x66\x61lse_mutations\x18\x05 \x03(\x0b\x32\x1c.google.bigtable.v2.Mutation\"6\n\x19\x43heckAndMutateRowResponse\x12\x19\n\x11predicate_matched\x18\x01 \x01(\x08\"i\n\x12PingAndWarmRequest\x12;\n\x04name\x18\x01 \x01(\tB-\xe0\x41\x02\xfa\x41\'\n%bigtableadmin.googleapis.com/Instance\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"\x15\n\x13PingAndWarmResponse\"\xc6\x01\n\x19ReadModifyWriteRowRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x04 \x01(\t\x12\x14\n\x07row_key\x18\x02 \x01(\x0c\x42\x03\xe0\x41\x02\x12;\n\x05rules\x18\x03 \x03(\x0b\x32\'.google.bigtable.v2.ReadModifyWriteRuleB\x03\xe0\x41\x02\"B\n\x1aReadModifyWriteRowResponse\x12$\n\x03row\x18\x01 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"\x86\x01\n,GenerateInitialChangeStreamPartitionsRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\"g\n-GenerateInitialChangeStreamPartitionsResponse\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\"\x9b\x03\n\x17ReadChangeStreamRequest\x12>\n\ntable_name\x18\x01 \x01(\tB*\xe0\x41\x02\xfa\x41$\n\"bigtableadmin.googleapis.com/Table\x12\x16\n\x0e\x61pp_profile_id\x18\x02 \x01(\t\x12\x36\n\tpartition\x18\x03 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\x30\n\nstart_time\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.TimestampH\x00\x12K\n\x13\x63ontinuation_tokens\x18\x06 \x01(\x0b\x32,.google.bigtable.v2.StreamContinuationTokensH\x00\x12,\n\x08\x65nd_time\x18\x05 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x35\n\x12heartbeat_duration\x18\x07 \x01(\x0b\x32\x19.google.protobuf.DurationB\x0c\n\nstart_from\"\xeb\t\n\x18ReadChangeStreamResponse\x12N\n\x0b\x64\x61ta_change\x18\x01 \x01(\x0b\x32\x37.google.bigtable.v2.ReadChangeStreamResponse.DataChangeH\x00\x12K\n\theartbeat\x18\x02 \x01(\x0b\x32\x36.google.bigtable.v2.ReadChangeStreamResponse.HeartbeatH\x00\x12P\n\x0c\x63lose_stream\x18\x03 \x01(\x0b\x32\x38.google.bigtable.v2.ReadChangeStreamResponse.CloseStreamH\x00\x1a\xf4\x01\n\rMutationChunk\x12X\n\nchunk_info\x18\x01 \x01(\x0b\x32\x44.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk.ChunkInfo\x12.\n\x08mutation\x18\x02 \x01(\x0b\x32\x1c.google.bigtable.v2.Mutation\x1aY\n\tChunkInfo\x12\x1a\n\x12\x63hunked_value_size\x18\x01 \x01(\x05\x12\x1c\n\x14\x63hunked_value_offset\x18\x02 \x01(\x05\x12\x12\n\nlast_chunk\x18\x03 \x01(\x08\x1a\xc6\x03\n\nDataChange\x12J\n\x04type\x18\x01 \x01(\x0e\x32<.google.bigtable.v2.ReadChangeStreamResponse.DataChange.Type\x12\x19\n\x11source_cluster_id\x18\x02 \x01(\t\x12\x0f\n\x07row_key\x18\x03 \x01(\x0c\x12\x34\n\x10\x63ommit_timestamp\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x12\n\ntiebreaker\x18\x05 \x01(\x05\x12J\n\x06\x63hunks\x18\x06 \x03(\x0b\x32:.google.bigtable.v2.ReadChangeStreamResponse.MutationChunk\x12\x0c\n\x04\x64one\x18\x08 \x01(\x08\x12\r\n\x05token\x18\t \x01(\t\x12;\n\x17\x65stimated_low_watermark\x18\n \x01(\x0b\x32\x1a.google.protobuf.Timestamp\"P\n\x04Type\x12\x14\n\x10TYPE_UNSPECIFIED\x10\x00\x12\x08\n\x04USER\x10\x01\x12\x16\n\x12GARBAGE_COLLECTION\x10\x02\x12\x10\n\x0c\x43ONTINUATION\x10\x03\x1a\x91\x01\n\tHeartbeat\x12G\n\x12\x63ontinuation_token\x18\x01 \x01(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\x12;\n\x17\x65stimated_low_watermark\x18\x02 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x1a{\n\x0b\x43loseStream\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12H\n\x13\x63ontinuation_tokens\x18\x02 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationTokenB\x0f\n\rstream_record2\xd7\x18\n\x08\x42igtable\x12\x9b\x02\n\x08ReadRows\x12#.google.bigtable.v2.ReadRowsRequest\x1a$.google.bigtable.v2.ReadRowsResponse\"\xc1\x01\x82\xd3\xe4\x93\x02>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xac\x02\n\rSampleRowKeys\x12(.google.bigtable.v2.SampleRowKeysRequest\x1a).google.bigtable.v2.SampleRowKeysResponse\"\xc3\x01\x82\xd3\xe4\x93\x02@\x12>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xc1\x02\n\tMutateRow\x12$.google.bigtable.v2.MutateRowRequest\x1a%.google.bigtable.v2.MutateRowResponse\"\xe6\x01\x82\xd3\xe4\x93\x02?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x1ctable_name,row_key,mutations\xda\x41+table_name,row_key,mutations,app_profile_id\x12\xb3\x02\n\nMutateRows\x12%.google.bigtable.v2.MutateRowsRequest\x1a&.google.bigtable.v2.MutateRowsResponse\"\xd3\x01\x82\xd3\xe4\x93\x02@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x12table_name,entries\xda\x41!table_name,entries,app_profile_id0\x01\x12\xad\x03\n\x11\x43heckAndMutateRow\x12,.google.bigtable.v2.CheckAndMutateRowRequest\x1a-.google.bigtable.v2.CheckAndMutateRowResponse\"\xba\x02\x82\xd3\xe4\x93\x02G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x42table_name,row_key,predicate_filter,true_mutations,false_mutations\xda\x41Qtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id\x12\xee\x01\n\x0bPingAndWarm\x12&.google.bigtable.v2.PingAndWarmRequest\x1a\'.google.bigtable.v2.PingAndWarmResponse\"\x8d\x01\x82\xd3\xe4\x93\x02+\"&/v2/{name=projects/*/instances/*}:ping:\x01*\x8a\xd3\xe4\x93\x02\x39\x12%\n\x04name\x12\x1d{name=projects/*/instances/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x04name\xda\x41\x13name,app_profile_id\x12\xdd\x02\n\x12ReadModifyWriteRow\x12-.google.bigtable.v2.ReadModifyWriteRowRequest\x1a..google.bigtable.v2.ReadModifyWriteRowResponse\"\xe7\x01\x82\xd3\xe4\x93\x02H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\x01*\x8a\xd3\xe4\x93\x02N\x12:\n\ntable_name\x12,{table_name=projects/*/instances/*/tables/*}\x12\x10\n\x0e\x61pp_profile_id\xda\x41\x18table_name,row_key,rules\xda\x41\'table_name,row_key,rules,app_profile_id\x12\xbb\x02\n%GenerateInitialChangeStreamPartitions\x12@.google.bigtable.v2.GenerateInitialChangeStreamPartitionsRequest\x1a\x41.google.bigtable.v2.GenerateInitialChangeStreamPartitionsResponse\"\x8a\x01\x82\xd3\xe4\x93\x02[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x12\xe6\x01\n\x10ReadChangeStream\x12+.google.bigtable.v2.ReadChangeStreamRequest\x1a,.google.bigtable.v2.ReadChangeStreamResponse\"u\x82\xd3\xe4\x93\x02\x46\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\x01*\xda\x41\ntable_name\xda\x41\x19table_name,app_profile_id0\x01\x1a\xdb\x02\xca\x41\x17\x62igtable.googleapis.com\xd2\x41\xbd\x02https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-onlyB\xeb\x02\n\x16\x63om.google.bigtable.v2B\rBigtableProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2\xea\x41P\n%bigtableadmin.googleapis.com/Instance\x12\'projects/{project}/instances/{instance}\xea\x41\\\n\"bigtableadmin.googleapis.com/Table\x12\x36projects/{project}/instances/{instance}/tables/{table}b\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.bigtable_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\026com.google.bigtable.v2B\rBigtableProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2\352AP\n%bigtableadmin.googleapis.com/Instance\022\'projects/{project}/instances/{instance}\352A\\\n\"bigtableadmin.googleapis.com/Table\0226projects/{project}/instances/{instance}/tables/{table}' + _READROWSREQUEST.fields_by_name['table_name']._options = None + _READROWSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _SAMPLEROWKEYSREQUEST.fields_by_name['table_name']._options = None + _SAMPLEROWKEYSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _MUTATEROWREQUEST.fields_by_name['table_name']._options = None + _MUTATEROWREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _MUTATEROWREQUEST.fields_by_name['row_key']._options = None + _MUTATEROWREQUEST.fields_by_name['row_key']._serialized_options = b'\340A\002' + _MUTATEROWREQUEST.fields_by_name['mutations']._options = None + _MUTATEROWREQUEST.fields_by_name['mutations']._serialized_options = b'\340A\002' + _MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations']._options = None + _MUTATEROWSREQUEST_ENTRY.fields_by_name['mutations']._serialized_options = b'\340A\002' + _MUTATEROWSREQUEST.fields_by_name['table_name']._options = None + _MUTATEROWSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _MUTATEROWSREQUEST.fields_by_name['entries']._options = None + _MUTATEROWSREQUEST.fields_by_name['entries']._serialized_options = b'\340A\002' + _CHECKANDMUTATEROWREQUEST.fields_by_name['table_name']._options = None + _CHECKANDMUTATEROWREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _CHECKANDMUTATEROWREQUEST.fields_by_name['row_key']._options = None + _CHECKANDMUTATEROWREQUEST.fields_by_name['row_key']._serialized_options = b'\340A\002' + _PINGANDWARMREQUEST.fields_by_name['name']._options = None + _PINGANDWARMREQUEST.fields_by_name['name']._serialized_options = b'\340A\002\372A\'\n%bigtableadmin.googleapis.com/Instance' + _READMODIFYWRITEROWREQUEST.fields_by_name['table_name']._options = None + _READMODIFYWRITEROWREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _READMODIFYWRITEROWREQUEST.fields_by_name['row_key']._options = None + _READMODIFYWRITEROWREQUEST.fields_by_name['row_key']._serialized_options = b'\340A\002' + _READMODIFYWRITEROWREQUEST.fields_by_name['rules']._options = None + _READMODIFYWRITEROWREQUEST.fields_by_name['rules']._serialized_options = b'\340A\002' + _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST.fields_by_name['table_name']._options = None + _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _READCHANGESTREAMREQUEST.fields_by_name['table_name']._options = None + _READCHANGESTREAMREQUEST.fields_by_name['table_name']._serialized_options = b'\340A\002\372A$\n\"bigtableadmin.googleapis.com/Table' + _BIGTABLE._options = None + _BIGTABLE._serialized_options = b'\312A\027bigtable.googleapis.com\322A\275\002https://www.googleapis.com/auth/bigtable.data,https://www.googleapis.com/auth/bigtable.data.readonly,https://www.googleapis.com/auth/cloud-bigtable.data,https://www.googleapis.com/auth/cloud-bigtable.data.readonly,https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/cloud-platform.read-only' + _BIGTABLE.methods_by_name['ReadRows']._options = None + _BIGTABLE.methods_by_name['ReadRows']._serialized_options = b'\202\323\344\223\002>\"9/v2/{table_name=projects/*/instances/*/tables/*}:readRows:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\ntable_name\332A\031table_name,app_profile_id' + _BIGTABLE.methods_by_name['SampleRowKeys']._options = None + _BIGTABLE.methods_by_name['SampleRowKeys']._serialized_options = b'\202\323\344\223\002@\022>/v2/{table_name=projects/*/instances/*/tables/*}:sampleRowKeys\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\ntable_name\332A\031table_name,app_profile_id' + _BIGTABLE.methods_by_name['MutateRow']._options = None + _BIGTABLE.methods_by_name['MutateRow']._serialized_options = b'\202\323\344\223\002?\":/v2/{table_name=projects/*/instances/*/tables/*}:mutateRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\034table_name,row_key,mutations\332A+table_name,row_key,mutations,app_profile_id' + _BIGTABLE.methods_by_name['MutateRows']._options = None + _BIGTABLE.methods_by_name['MutateRows']._serialized_options = b'\202\323\344\223\002@\";/v2/{table_name=projects/*/instances/*/tables/*}:mutateRows:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\022table_name,entries\332A!table_name,entries,app_profile_id' + _BIGTABLE.methods_by_name['CheckAndMutateRow']._options = None + _BIGTABLE.methods_by_name['CheckAndMutateRow']._serialized_options = b'\202\323\344\223\002G\"B/v2/{table_name=projects/*/instances/*/tables/*}:checkAndMutateRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332ABtable_name,row_key,predicate_filter,true_mutations,false_mutations\332AQtable_name,row_key,predicate_filter,true_mutations,false_mutations,app_profile_id' + _BIGTABLE.methods_by_name['PingAndWarm']._options = None + _BIGTABLE.methods_by_name['PingAndWarm']._serialized_options = b'\202\323\344\223\002+\"&/v2/{name=projects/*/instances/*}:ping:\001*\212\323\344\223\0029\022%\n\004name\022\035{name=projects/*/instances/*}\022\020\n\016app_profile_id\332A\004name\332A\023name,app_profile_id' + _BIGTABLE.methods_by_name['ReadModifyWriteRow']._options = None + _BIGTABLE.methods_by_name['ReadModifyWriteRow']._serialized_options = b'\202\323\344\223\002H\"C/v2/{table_name=projects/*/instances/*/tables/*}:readModifyWriteRow:\001*\212\323\344\223\002N\022:\n\ntable_name\022,{table_name=projects/*/instances/*/tables/*}\022\020\n\016app_profile_id\332A\030table_name,row_key,rules\332A\'table_name,row_key,rules,app_profile_id' + _BIGTABLE.methods_by_name['GenerateInitialChangeStreamPartitions']._options = None + _BIGTABLE.methods_by_name['GenerateInitialChangeStreamPartitions']._serialized_options = b'\202\323\344\223\002[\"V/v2/{table_name=projects/*/instances/*/tables/*}:generateInitialChangeStreamPartitions:\001*\332A\ntable_name\332A\031table_name,app_profile_id' + _BIGTABLE.methods_by_name['ReadChangeStream']._options = None + _BIGTABLE.methods_by_name['ReadChangeStream']._serialized_options = b'\202\323\344\223\002F\"A/v2/{table_name=projects/*/instances/*/tables/*}:readChangeStream:\001*\332A\ntable_name\332A\031table_name,app_profile_id' + _READROWSREQUEST._serialized_start=392 + _READROWSREQUEST._serialized_end=792 + _READROWSREQUEST_REQUESTSTATSVIEW._serialized_start=690 + _READROWSREQUEST_REQUESTSTATSVIEW._serialized_end=792 + _READROWSRESPONSE._serialized_start=795 + _READROWSRESPONSE._serialized_end=1228 + _READROWSRESPONSE_CELLCHUNK._serialized_start=967 + _READROWSRESPONSE_CELLCHUNK._serialized_end=1228 + _SAMPLEROWKEYSREQUEST._serialized_start=1230 + _SAMPLEROWKEYSREQUEST._serialized_end=1340 + _SAMPLEROWKEYSRESPONSE._serialized_start=1342 + _SAMPLEROWKEYSRESPONSE._serialized_end=1404 + _MUTATEROWREQUEST._serialized_start=1407 + _MUTATEROWREQUEST._serialized_end=1589 + _MUTATEROWRESPONSE._serialized_start=1591 + _MUTATEROWRESPONSE._serialized_end=1610 + _MUTATEROWSREQUEST._serialized_start=1613 + _MUTATEROWSREQUEST._serialized_end=1867 + _MUTATEROWSREQUEST_ENTRY._serialized_start=1789 + _MUTATEROWSREQUEST_ENTRY._serialized_end=1867 + _MUTATEROWSRESPONSE._serialized_start=1870 + _MUTATEROWSRESPONSE._serialized_end=2013 + _MUTATEROWSRESPONSE_ENTRY._serialized_start=1955 + _MUTATEROWSRESPONSE_ENTRY._serialized_end=2013 + _CHECKANDMUTATEROWREQUEST._serialized_start=2016 + _CHECKANDMUTATEROWREQUEST._serialized_end=2318 + _CHECKANDMUTATEROWRESPONSE._serialized_start=2320 + _CHECKANDMUTATEROWRESPONSE._serialized_end=2374 + _PINGANDWARMREQUEST._serialized_start=2376 + _PINGANDWARMREQUEST._serialized_end=2481 + _PINGANDWARMRESPONSE._serialized_start=2483 + _PINGANDWARMRESPONSE._serialized_end=2504 + _READMODIFYWRITEROWREQUEST._serialized_start=2507 + _READMODIFYWRITEROWREQUEST._serialized_end=2705 + _READMODIFYWRITEROWRESPONSE._serialized_start=2707 + _READMODIFYWRITEROWRESPONSE._serialized_end=2773 + _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST._serialized_start=2776 + _GENERATEINITIALCHANGESTREAMPARTITIONSREQUEST._serialized_end=2910 + _GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE._serialized_start=2912 + _GENERATEINITIALCHANGESTREAMPARTITIONSRESPONSE._serialized_end=3015 + _READCHANGESTREAMREQUEST._serialized_start=3018 + _READCHANGESTREAMREQUEST._serialized_end=3429 + _READCHANGESTREAMRESPONSE._serialized_start=3432 + _READCHANGESTREAMRESPONSE._serialized_end=4691 + _READCHANGESTREAMRESPONSE_MUTATIONCHUNK._serialized_start=3700 + _READCHANGESTREAMRESPONSE_MUTATIONCHUNK._serialized_end=3944 + _READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO._serialized_start=3855 + _READCHANGESTREAMRESPONSE_MUTATIONCHUNK_CHUNKINFO._serialized_end=3944 + _READCHANGESTREAMRESPONSE_DATACHANGE._serialized_start=3947 + _READCHANGESTREAMRESPONSE_DATACHANGE._serialized_end=4401 + _READCHANGESTREAMRESPONSE_DATACHANGE_TYPE._serialized_start=4321 + _READCHANGESTREAMRESPONSE_DATACHANGE_TYPE._serialized_end=4401 + _READCHANGESTREAMRESPONSE_HEARTBEAT._serialized_start=4404 + _READCHANGESTREAMRESPONSE_HEARTBEAT._serialized_end=4549 + _READCHANGESTREAMRESPONSE_CLOSESTREAM._serialized_start=4551 + _READCHANGESTREAMRESPONSE_CLOSESTREAM._serialized_end=4674 + _BIGTABLE._serialized_start=4694 + _BIGTABLE._serialized_end=7853 +# @@protoc_insertion_point(module_scope) diff --git a/test_proxy/protos/bigtable_pb2_grpc.py b/test_proxy/protos/bigtable_pb2_grpc.py new file mode 100644 index 000000000..9ce87d869 --- /dev/null +++ b/test_proxy/protos/bigtable_pb2_grpc.py @@ -0,0 +1,363 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +import bigtable_pb2 as google_dot_bigtable_dot_v2_dot_bigtable__pb2 + + +class BigtableStub(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ReadRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadRows', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsResponse.FromString, + ) + self.SampleRowKeys = channel.unary_stream( + '/google.bigtable.v2.Bigtable/SampleRowKeys', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysResponse.FromString, + ) + self.MutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/MutateRow', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowResponse.FromString, + ) + self.MutateRows = channel.unary_stream( + '/google.bigtable.v2.Bigtable/MutateRows', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsResponse.FromString, + ) + self.CheckAndMutateRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, + ) + self.PingAndWarm = channel.unary_unary( + '/google.bigtable.v2.Bigtable/PingAndWarm', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmResponse.FromString, + ) + self.ReadModifyWriteRow = channel.unary_unary( + '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, + ) + self.GenerateInitialChangeStreamPartitions = channel.unary_stream( + '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsResponse.FromString, + ) + self.ReadChangeStream = channel.unary_stream( + '/google.bigtable.v2.Bigtable/ReadChangeStream', + request_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.SerializeToString, + response_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.FromString, + ) + + +class BigtableServicer(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + def ReadRows(self, request, context): + """Streams back the contents of all requested rows in key order, optionally + applying the same Reader filter to each. Depending on their size, + rows and cells may be broken up across multiple responses, but + atomicity of each row will still be preserved. See the + ReadRowsResponse documentation for details. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SampleRowKeys(self, request, context): + """Returns a sample of row keys in the table. The returned row keys will + delimit contiguous sections of the table of approximately equal size, + which can be used to break up the data for distributed tasks like + mapreduces. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRow(self, request, context): + """Mutates a row atomically. Cells already present in the row are left + unchanged unless explicitly changed by `mutation`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRows(self, request, context): + """Mutates multiple rows in a batch. Each individual row is mutated + atomically as in MutateRow, but the entire batch is not executed + atomically. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckAndMutateRow(self, request, context): + """Mutates a row atomically based on the output of a predicate Reader filter. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PingAndWarm(self, request, context): + """Warm up associated instance metadata for this connection. + This call is not required but may be useful for connection keep-alive. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadModifyWriteRow(self, request, context): + """Modifies a row atomically on the server. The method reads the latest + existing timestamp and value from the specified columns and writes a new + entry based on pre-defined read/modify/write rules. The new value for the + timestamp is the greater of the existing timestamp or the current server + time. The method returns the new contents of all modified cells. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GenerateInitialChangeStreamPartitions(self, request, context): + """NOTE: This API is intended to be used by Apache Beam BigtableIO. + Returns the current list of partitions that make up the table's + change stream. The union of partitions will cover the entire keyspace. + Partitions can be read with `ReadChangeStream`. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadChangeStream(self, request, context): + """NOTE: This API is intended to be used by Apache Beam BigtableIO. + Reads changes from a table's change stream. Changes will + reflect both user-initiated mutations and mutations that are caused by + garbage collection. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BigtableServicer_to_server(servicer, server): + rpc_method_handlers = { + 'ReadRows': grpc.unary_stream_rpc_method_handler( + servicer.ReadRows, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsResponse.SerializeToString, + ), + 'SampleRowKeys': grpc.unary_stream_rpc_method_handler( + servicer.SampleRowKeys, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysResponse.SerializeToString, + ), + 'MutateRow': grpc.unary_unary_rpc_method_handler( + servicer.MutateRow, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowResponse.SerializeToString, + ), + 'MutateRows': grpc.unary_stream_rpc_method_handler( + servicer.MutateRows, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsResponse.SerializeToString, + ), + 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( + servicer.CheckAndMutateRow, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowResponse.SerializeToString, + ), + 'PingAndWarm': grpc.unary_unary_rpc_method_handler( + servicer.PingAndWarm, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmResponse.SerializeToString, + ), + 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadModifyWriteRow, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowResponse.SerializeToString, + ), + 'GenerateInitialChangeStreamPartitions': grpc.unary_stream_rpc_method_handler( + servicer.GenerateInitialChangeStreamPartitions, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsResponse.SerializeToString, + ), + 'ReadChangeStream': grpc.unary_stream_rpc_method_handler( + servicer.ReadChangeStream, + request_deserializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.FromString, + response_serializer=google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.v2.Bigtable', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Bigtable(object): + """Service for reading from and writing to existing Bigtable tables. + """ + + @staticmethod + def ReadRows(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/ReadRows', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadRowsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def SampleRowKeys(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/SampleRowKeys', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.SampleRowKeysResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def MutateRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/MutateRow', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def MutateRows(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/MutateRows', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.MutateRowsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CheckAndMutateRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/CheckAndMutateRow', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.CheckAndMutateRowResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PingAndWarm(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/PingAndWarm', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.PingAndWarmResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReadModifyWriteRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.v2.Bigtable/ReadModifyWriteRow', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadModifyWriteRowResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GenerateInitialChangeStreamPartitions(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/GenerateInitialChangeStreamPartitions', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.GenerateInitialChangeStreamPartitionsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReadChangeStream(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_stream(request, target, '/google.bigtable.v2.Bigtable/ReadChangeStream', + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamRequest.SerializeToString, + google_dot_bigtable_dot_v2_dot_bigtable__pb2.ReadChangeStreamResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/test_proxy/protos/data_pb2.py b/test_proxy/protos/data_pb2.py new file mode 100644 index 000000000..fff212034 --- /dev/null +++ b/test_proxy/protos/data_pb2.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/v2/data.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x1dgoogle/bigtable/v2/data.proto\x12\x12google.bigtable.v2\"@\n\x03Row\x12\x0b\n\x03key\x18\x01 \x01(\x0c\x12,\n\x08\x66\x61milies\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Family\"C\n\x06\x46\x61mily\x12\x0c\n\x04name\x18\x01 \x01(\t\x12+\n\x07\x63olumns\x18\x02 \x03(\x0b\x32\x1a.google.bigtable.v2.Column\"D\n\x06\x43olumn\x12\x11\n\tqualifier\x18\x01 \x01(\x0c\x12\'\n\x05\x63\x65lls\x18\x02 \x03(\x0b\x32\x18.google.bigtable.v2.Cell\"?\n\x04\x43\x65ll\x12\x18\n\x10timestamp_micros\x18\x01 \x01(\x03\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x0e\n\x06labels\x18\x03 \x03(\t\"\x8a\x01\n\x08RowRange\x12\x1a\n\x10start_key_closed\x18\x01 \x01(\x0cH\x00\x12\x18\n\x0estart_key_open\x18\x02 \x01(\x0cH\x00\x12\x16\n\x0c\x65nd_key_open\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_key_closed\x18\x04 \x01(\x0cH\x01\x42\x0b\n\tstart_keyB\t\n\x07\x65nd_key\"L\n\x06RowSet\x12\x10\n\x08row_keys\x18\x01 \x03(\x0c\x12\x30\n\nrow_ranges\x18\x02 \x03(\x0b\x32\x1c.google.bigtable.v2.RowRange\"\xc6\x01\n\x0b\x43olumnRange\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12 \n\x16start_qualifier_closed\x18\x02 \x01(\x0cH\x00\x12\x1e\n\x14start_qualifier_open\x18\x03 \x01(\x0cH\x00\x12\x1e\n\x14\x65nd_qualifier_closed\x18\x04 \x01(\x0cH\x01\x12\x1c\n\x12\x65nd_qualifier_open\x18\x05 \x01(\x0cH\x01\x42\x11\n\x0fstart_qualifierB\x0f\n\rend_qualifier\"N\n\x0eTimestampRange\x12\x1e\n\x16start_timestamp_micros\x18\x01 \x01(\x03\x12\x1c\n\x14\x65nd_timestamp_micros\x18\x02 \x01(\x03\"\x98\x01\n\nValueRange\x12\x1c\n\x12start_value_closed\x18\x01 \x01(\x0cH\x00\x12\x1a\n\x10start_value_open\x18\x02 \x01(\x0cH\x00\x12\x1a\n\x10\x65nd_value_closed\x18\x03 \x01(\x0cH\x01\x12\x18\n\x0e\x65nd_value_open\x18\x04 \x01(\x0cH\x01\x42\r\n\x0bstart_valueB\x0b\n\tend_value\"\xdf\x08\n\tRowFilter\x12\x34\n\x05\x63hain\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.RowFilter.ChainH\x00\x12>\n\ninterleave\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.RowFilter.InterleaveH\x00\x12<\n\tcondition\x18\x03 \x01(\x0b\x32\'.google.bigtable.v2.RowFilter.ConditionH\x00\x12\x0e\n\x04sink\x18\x10 \x01(\x08H\x00\x12\x19\n\x0fpass_all_filter\x18\x11 \x01(\x08H\x00\x12\x1a\n\x10\x62lock_all_filter\x18\x12 \x01(\x08H\x00\x12\x1e\n\x14row_key_regex_filter\x18\x04 \x01(\x0cH\x00\x12\x1b\n\x11row_sample_filter\x18\x0e \x01(\x01H\x00\x12\"\n\x18\x66\x61mily_name_regex_filter\x18\x05 \x01(\tH\x00\x12\'\n\x1d\x63olumn_qualifier_regex_filter\x18\x06 \x01(\x0cH\x00\x12>\n\x13\x63olumn_range_filter\x18\x07 \x01(\x0b\x32\x1f.google.bigtable.v2.ColumnRangeH\x00\x12\x44\n\x16timestamp_range_filter\x18\x08 \x01(\x0b\x32\".google.bigtable.v2.TimestampRangeH\x00\x12\x1c\n\x12value_regex_filter\x18\t \x01(\x0cH\x00\x12<\n\x12value_range_filter\x18\x0f \x01(\x0b\x32\x1e.google.bigtable.v2.ValueRangeH\x00\x12%\n\x1b\x63\x65lls_per_row_offset_filter\x18\n \x01(\x05H\x00\x12$\n\x1a\x63\x65lls_per_row_limit_filter\x18\x0b \x01(\x05H\x00\x12\'\n\x1d\x63\x65lls_per_column_limit_filter\x18\x0c \x01(\x05H\x00\x12!\n\x17strip_value_transformer\x18\r \x01(\x08H\x00\x12!\n\x17\x61pply_label_transformer\x18\x13 \x01(\tH\x00\x1a\x37\n\x05\x43hain\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a<\n\nInterleave\x12.\n\x07\x66ilters\x18\x01 \x03(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x1a\xad\x01\n\tCondition\x12\x37\n\x10predicate_filter\x18\x01 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x32\n\x0btrue_filter\x18\x02 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\x12\x33\n\x0c\x66\x61lse_filter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilterB\x08\n\x06\x66ilter\"\xc9\x04\n\x08Mutation\x12\x38\n\x08set_cell\x18\x01 \x01(\x0b\x32$.google.bigtable.v2.Mutation.SetCellH\x00\x12K\n\x12\x64\x65lete_from_column\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromColumnH\x00\x12K\n\x12\x64\x65lete_from_family\x18\x03 \x01(\x0b\x32-.google.bigtable.v2.Mutation.DeleteFromFamilyH\x00\x12\x45\n\x0f\x64\x65lete_from_row\x18\x04 \x01(\x0b\x32*.google.bigtable.v2.Mutation.DeleteFromRowH\x00\x1a\x61\n\x07SetCell\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x18\n\x10timestamp_micros\x18\x03 \x01(\x03\x12\r\n\x05value\x18\x04 \x01(\x0c\x1ay\n\x10\x44\x65leteFromColumn\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x36\n\ntime_range\x18\x03 \x01(\x0b\x32\".google.bigtable.v2.TimestampRange\x1a\'\n\x10\x44\x65leteFromFamily\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x1a\x0f\n\rDeleteFromRowB\n\n\x08mutation\"\x80\x01\n\x13ReadModifyWriteRule\x12\x13\n\x0b\x66\x61mily_name\x18\x01 \x01(\t\x12\x18\n\x10\x63olumn_qualifier\x18\x02 \x01(\x0c\x12\x16\n\x0c\x61ppend_value\x18\x03 \x01(\x0cH\x00\x12\x1a\n\x10increment_amount\x18\x04 \x01(\x03H\x00\x42\x06\n\x04rule\"B\n\x0fStreamPartition\x12/\n\trow_range\x18\x01 \x01(\x0b\x32\x1c.google.bigtable.v2.RowRange\"W\n\x18StreamContinuationTokens\x12;\n\x06tokens\x18\x01 \x03(\x0b\x32+.google.bigtable.v2.StreamContinuationToken\"`\n\x17StreamContinuationToken\x12\x36\n\tpartition\x18\x01 \x01(\x0b\x32#.google.bigtable.v2.StreamPartition\x12\r\n\x05token\x18\x02 \x01(\tB\xb5\x01\n\x16\x63om.google.bigtable.v2B\tDataProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.data_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\026com.google.bigtable.v2B\tDataProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2' + _ROW._serialized_start=53 + _ROW._serialized_end=117 + _FAMILY._serialized_start=119 + _FAMILY._serialized_end=186 + _COLUMN._serialized_start=188 + _COLUMN._serialized_end=256 + _CELL._serialized_start=258 + _CELL._serialized_end=321 + _ROWRANGE._serialized_start=324 + _ROWRANGE._serialized_end=462 + _ROWSET._serialized_start=464 + _ROWSET._serialized_end=540 + _COLUMNRANGE._serialized_start=543 + _COLUMNRANGE._serialized_end=741 + _TIMESTAMPRANGE._serialized_start=743 + _TIMESTAMPRANGE._serialized_end=821 + _VALUERANGE._serialized_start=824 + _VALUERANGE._serialized_end=976 + _ROWFILTER._serialized_start=979 + _ROWFILTER._serialized_end=2098 + _ROWFILTER_CHAIN._serialized_start=1795 + _ROWFILTER_CHAIN._serialized_end=1850 + _ROWFILTER_INTERLEAVE._serialized_start=1852 + _ROWFILTER_INTERLEAVE._serialized_end=1912 + _ROWFILTER_CONDITION._serialized_start=1915 + _ROWFILTER_CONDITION._serialized_end=2088 + _MUTATION._serialized_start=2101 + _MUTATION._serialized_end=2686 + _MUTATION_SETCELL._serialized_start=2396 + _MUTATION_SETCELL._serialized_end=2493 + _MUTATION_DELETEFROMCOLUMN._serialized_start=2495 + _MUTATION_DELETEFROMCOLUMN._serialized_end=2616 + _MUTATION_DELETEFROMFAMILY._serialized_start=2618 + _MUTATION_DELETEFROMFAMILY._serialized_end=2657 + _MUTATION_DELETEFROMROW._serialized_start=2659 + _MUTATION_DELETEFROMROW._serialized_end=2674 + _READMODIFYWRITERULE._serialized_start=2689 + _READMODIFYWRITERULE._serialized_end=2817 + _STREAMPARTITION._serialized_start=2819 + _STREAMPARTITION._serialized_end=2885 + _STREAMCONTINUATIONTOKENS._serialized_start=2887 + _STREAMCONTINUATIONTOKENS._serialized_end=2974 + _STREAMCONTINUATIONTOKEN._serialized_start=2976 + _STREAMCONTINUATIONTOKEN._serialized_end=3072 +# @@protoc_insertion_point(module_scope) diff --git a/test_proxy/protos/data_pb2_grpc.py b/test_proxy/protos/data_pb2_grpc.py new file mode 100644 index 000000000..2daafffeb --- /dev/null +++ b/test_proxy/protos/data_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/test_proxy/protos/request_stats_pb2.py b/test_proxy/protos/request_stats_pb2.py new file mode 100644 index 000000000..95fcc6e0f --- /dev/null +++ b/test_proxy/protos/request_stats_pb2.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: google/bigtable/v2/request_stats.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n&google/bigtable/v2/request_stats.proto\x12\x12google.bigtable.v2\x1a\x1egoogle/protobuf/duration.proto\"\x82\x01\n\x12ReadIterationStats\x12\x17\n\x0frows_seen_count\x18\x01 \x01(\x03\x12\x1b\n\x13rows_returned_count\x18\x02 \x01(\x03\x12\x18\n\x10\x63\x65lls_seen_count\x18\x03 \x01(\x03\x12\x1c\n\x14\x63\x65lls_returned_count\x18\x04 \x01(\x03\"Q\n\x13RequestLatencyStats\x12:\n\x17\x66rontend_server_latency\x18\x01 \x01(\x0b\x32\x19.google.protobuf.Duration\"\xa1\x01\n\x11\x46ullReadStatsView\x12\x44\n\x14read_iteration_stats\x18\x01 \x01(\x0b\x32&.google.bigtable.v2.ReadIterationStats\x12\x46\n\x15request_latency_stats\x18\x02 \x01(\x0b\x32\'.google.bigtable.v2.RequestLatencyStats\"c\n\x0cRequestStats\x12\x45\n\x14\x66ull_read_stats_view\x18\x01 \x01(\x0b\x32%.google.bigtable.v2.FullReadStatsViewH\x00\x42\x0c\n\nstats_viewB\xbd\x01\n\x16\x63om.google.bigtable.v2B\x11RequestStatsProtoP\x01Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\xaa\x02\x18Google.Cloud.Bigtable.V2\xca\x02\x18Google\\Cloud\\Bigtable\\V2\xea\x02\x1bGoogle::Cloud::Bigtable::V2b\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'google.bigtable.v2.request_stats_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n\026com.google.bigtable.v2B\021RequestStatsProtoP\001Z:google.golang.org/genproto/googleapis/bigtable/v2;bigtable\252\002\030Google.Cloud.Bigtable.V2\312\002\030Google\\Cloud\\Bigtable\\V2\352\002\033Google::Cloud::Bigtable::V2' + _READITERATIONSTATS._serialized_start=95 + _READITERATIONSTATS._serialized_end=225 + _REQUESTLATENCYSTATS._serialized_start=227 + _REQUESTLATENCYSTATS._serialized_end=308 + _FULLREADSTATSVIEW._serialized_start=311 + _FULLREADSTATSVIEW._serialized_end=472 + _REQUESTSTATS._serialized_start=474 + _REQUESTSTATS._serialized_end=573 +# @@protoc_insertion_point(module_scope) diff --git a/test_proxy/protos/request_stats_pb2_grpc.py b/test_proxy/protos/request_stats_pb2_grpc.py new file mode 100644 index 000000000..2daafffeb --- /dev/null +++ b/test_proxy/protos/request_stats_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/test_proxy/protos/test_proxy_pb2.py b/test_proxy/protos/test_proxy_pb2.py new file mode 100644 index 000000000..8c7817b14 --- /dev/null +++ b/test_proxy/protos/test_proxy_pb2.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: test_proxy.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import builder as _builder +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import client_pb2 as google_dot_api_dot_client__pb2 +import bigtable_pb2 as google_dot_bigtable_dot_v2_dot_bigtable__pb2 +import data_pb2 as google_dot_bigtable_dot_v2_dot_data__pb2 +from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2 +from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2 + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x10test_proxy.proto\x12\x19google.bigtable.testproxy\x1a\x17google/api/client.proto\x1a!google/bigtable/v2/bigtable.proto\x1a\x1dgoogle/bigtable/v2/data.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x17google/rpc/status.proto\"\xb8\x01\n\x13\x43reateClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x13\n\x0b\x64\x61ta_target\x18\x02 \x01(\t\x12\x12\n\nproject_id\x18\x03 \x01(\t\x12\x13\n\x0binstance_id\x18\x04 \x01(\t\x12\x16\n\x0e\x61pp_profile_id\x18\x05 \x01(\t\x12\x38\n\x15per_operation_timeout\x18\x06 \x01(\x0b\x32\x19.google.protobuf.Duration\"\x16\n\x14\x43reateClientResponse\"\'\n\x12\x43loseClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x15\n\x13\x43loseClientResponse\"(\n\x13RemoveClientRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\"\x16\n\x14RemoveClientResponse\"w\n\x0eReadRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x12\n\ntable_name\x18\x04 \x01(\t\x12\x0f\n\x07row_key\x18\x02 \x01(\t\x12-\n\x06\x66ilter\x18\x03 \x01(\x0b\x32\x1d.google.bigtable.v2.RowFilter\"U\n\tRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12$\n\x03row\x18\x02 \x01(\x0b\x32\x17.google.bigtable.v2.Row\"u\n\x0fReadRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x34\n\x07request\x18\x02 \x01(\x0b\x32#.google.bigtable.v2.ReadRowsRequest\x12\x19\n\x11\x63\x61ncel_after_rows\x18\x03 \x01(\x05\"V\n\nRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12$\n\x03row\x18\x02 \x03(\x0b\x32\x17.google.bigtable.v2.Row\"\\\n\x10MutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x35\n\x07request\x18\x02 \x01(\x0b\x32$.google.bigtable.v2.MutateRowRequest\"5\n\x0fMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\"^\n\x11MutateRowsRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x36\n\x07request\x18\x02 \x01(\x0b\x32%.google.bigtable.v2.MutateRowsRequest\"s\n\x10MutateRowsResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12;\n\x05\x65ntry\x18\x02 \x03(\x0b\x32,.google.bigtable.v2.MutateRowsResponse.Entry\"l\n\x18\x43heckAndMutateRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12=\n\x07request\x18\x02 \x01(\x0b\x32,.google.bigtable.v2.CheckAndMutateRowRequest\"|\n\x17\x43heckAndMutateRowResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12=\n\x06result\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.CheckAndMutateRowResponse\"d\n\x14SampleRowKeysRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12\x39\n\x07request\x18\x02 \x01(\x0b\x32(.google.bigtable.v2.SampleRowKeysRequest\"t\n\x13SampleRowKeysResult\x12\"\n\x06status\x18\x01 \x01(\x0b\x32\x12.google.rpc.Status\x12\x39\n\x06sample\x18\x02 \x03(\x0b\x32).google.bigtable.v2.SampleRowKeysResponse\"n\n\x19ReadModifyWriteRowRequest\x12\x11\n\tclient_id\x18\x01 \x01(\t\x12>\n\x07request\x18\x02 \x01(\x0b\x32-.google.bigtable.v2.ReadModifyWriteRowRequest2\xa4\t\n\x18\x43loudBigtableV2TestProxy\x12q\n\x0c\x43reateClient\x12..google.bigtable.testproxy.CreateClientRequest\x1a/.google.bigtable.testproxy.CreateClientResponse\"\x00\x12n\n\x0b\x43loseClient\x12-.google.bigtable.testproxy.CloseClientRequest\x1a..google.bigtable.testproxy.CloseClientResponse\"\x00\x12q\n\x0cRemoveClient\x12..google.bigtable.testproxy.RemoveClientRequest\x1a/.google.bigtable.testproxy.RemoveClientResponse\"\x00\x12\\\n\x07ReadRow\x12).google.bigtable.testproxy.ReadRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x12_\n\x08ReadRows\x12*.google.bigtable.testproxy.ReadRowsRequest\x1a%.google.bigtable.testproxy.RowsResult\"\x00\x12\x66\n\tMutateRow\x12+.google.bigtable.testproxy.MutateRowRequest\x1a*.google.bigtable.testproxy.MutateRowResult\"\x00\x12m\n\x0e\x42ulkMutateRows\x12,.google.bigtable.testproxy.MutateRowsRequest\x1a+.google.bigtable.testproxy.MutateRowsResult\"\x00\x12~\n\x11\x43heckAndMutateRow\x12\x33.google.bigtable.testproxy.CheckAndMutateRowRequest\x1a\x32.google.bigtable.testproxy.CheckAndMutateRowResult\"\x00\x12r\n\rSampleRowKeys\x12/.google.bigtable.testproxy.SampleRowKeysRequest\x1a..google.bigtable.testproxy.SampleRowKeysResult\"\x00\x12r\n\x12ReadModifyWriteRow\x12\x34.google.bigtable.testproxy.ReadModifyWriteRowRequest\x1a$.google.bigtable.testproxy.RowResult\"\x00\x1a\x34\xca\x41\x31\x62igtable-test-proxy-not-accessible.googleapis.comB6\n#com.google.cloud.bigtable.testproxyP\x01Z\r./testproxypbb\x06proto3') + +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, globals()) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'test_proxy_pb2', globals()) +if _descriptor._USE_C_DESCRIPTORS == False: + + DESCRIPTOR._options = None + DESCRIPTOR._serialized_options = b'\n#com.google.cloud.bigtable.testproxyP\001Z\r./testproxypb' + _CLOUDBIGTABLEV2TESTPROXY._options = None + _CLOUDBIGTABLEV2TESTPROXY._serialized_options = b'\312A1bigtable-test-proxy-not-accessible.googleapis.com' + _CREATECLIENTREQUEST._serialized_start=196 + _CREATECLIENTREQUEST._serialized_end=380 + _CREATECLIENTRESPONSE._serialized_start=382 + _CREATECLIENTRESPONSE._serialized_end=404 + _CLOSECLIENTREQUEST._serialized_start=406 + _CLOSECLIENTREQUEST._serialized_end=445 + _CLOSECLIENTRESPONSE._serialized_start=447 + _CLOSECLIENTRESPONSE._serialized_end=468 + _REMOVECLIENTREQUEST._serialized_start=470 + _REMOVECLIENTREQUEST._serialized_end=510 + _REMOVECLIENTRESPONSE._serialized_start=512 + _REMOVECLIENTRESPONSE._serialized_end=534 + _READROWREQUEST._serialized_start=536 + _READROWREQUEST._serialized_end=655 + _ROWRESULT._serialized_start=657 + _ROWRESULT._serialized_end=742 + _READROWSREQUEST._serialized_start=744 + _READROWSREQUEST._serialized_end=861 + _ROWSRESULT._serialized_start=863 + _ROWSRESULT._serialized_end=949 + _MUTATEROWREQUEST._serialized_start=951 + _MUTATEROWREQUEST._serialized_end=1043 + _MUTATEROWRESULT._serialized_start=1045 + _MUTATEROWRESULT._serialized_end=1098 + _MUTATEROWSREQUEST._serialized_start=1100 + _MUTATEROWSREQUEST._serialized_end=1194 + _MUTATEROWSRESULT._serialized_start=1196 + _MUTATEROWSRESULT._serialized_end=1311 + _CHECKANDMUTATEROWREQUEST._serialized_start=1313 + _CHECKANDMUTATEROWREQUEST._serialized_end=1421 + _CHECKANDMUTATEROWRESULT._serialized_start=1423 + _CHECKANDMUTATEROWRESULT._serialized_end=1547 + _SAMPLEROWKEYSREQUEST._serialized_start=1549 + _SAMPLEROWKEYSREQUEST._serialized_end=1649 + _SAMPLEROWKEYSRESULT._serialized_start=1651 + _SAMPLEROWKEYSRESULT._serialized_end=1767 + _READMODIFYWRITEROWREQUEST._serialized_start=1769 + _READMODIFYWRITEROWREQUEST._serialized_end=1879 + _CLOUDBIGTABLEV2TESTPROXY._serialized_start=1882 + _CLOUDBIGTABLEV2TESTPROXY._serialized_end=3070 +# @@protoc_insertion_point(module_scope) diff --git a/test_proxy/protos/test_proxy_pb2_grpc.py b/test_proxy/protos/test_proxy_pb2_grpc.py new file mode 100644 index 000000000..60214a584 --- /dev/null +++ b/test_proxy/protos/test_proxy_pb2_grpc.py @@ -0,0 +1,433 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +import test_proxy_pb2 as test__proxy__pb2 + + +class CloudBigtableV2TestProxyStub(object): + """Note that all RPCs are unary, even when the equivalent client binding call + may be streaming. This is an intentional simplification. + + Most methods have sync (default) and async variants. For async variants, + the proxy is expected to perform the async operation, then wait for results + before delivering them back to the driver client. + + Operations that may have interesting concurrency characteristics are + represented explicitly in the API (see ReadRowsRequest.cancel_after_rows). + We include such operations only when they can be meaningfully performed + through client bindings. + + Users should generally avoid setting deadlines for requests to the Proxy + because operations are not cancelable. If the deadline is set anyway, please + understand that the underlying operation will continue to be executed even + after the deadline expires. + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CreateClient = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CreateClient', + request_serializer=test__proxy__pb2.CreateClientRequest.SerializeToString, + response_deserializer=test__proxy__pb2.CreateClientResponse.FromString, + ) + self.CloseClient = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CloseClient', + request_serializer=test__proxy__pb2.CloseClientRequest.SerializeToString, + response_deserializer=test__proxy__pb2.CloseClientResponse.FromString, + ) + self.RemoveClient = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/RemoveClient', + request_serializer=test__proxy__pb2.RemoveClientRequest.SerializeToString, + response_deserializer=test__proxy__pb2.RemoveClientResponse.FromString, + ) + self.ReadRow = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRow', + request_serializer=test__proxy__pb2.ReadRowRequest.SerializeToString, + response_deserializer=test__proxy__pb2.RowResult.FromString, + ) + self.ReadRows = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRows', + request_serializer=test__proxy__pb2.ReadRowsRequest.SerializeToString, + response_deserializer=test__proxy__pb2.RowsResult.FromString, + ) + self.MutateRow = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/MutateRow', + request_serializer=test__proxy__pb2.MutateRowRequest.SerializeToString, + response_deserializer=test__proxy__pb2.MutateRowResult.FromString, + ) + self.BulkMutateRows = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/BulkMutateRows', + request_serializer=test__proxy__pb2.MutateRowsRequest.SerializeToString, + response_deserializer=test__proxy__pb2.MutateRowsResult.FromString, + ) + self.CheckAndMutateRow = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CheckAndMutateRow', + request_serializer=test__proxy__pb2.CheckAndMutateRowRequest.SerializeToString, + response_deserializer=test__proxy__pb2.CheckAndMutateRowResult.FromString, + ) + self.SampleRowKeys = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/SampleRowKeys', + request_serializer=test__proxy__pb2.SampleRowKeysRequest.SerializeToString, + response_deserializer=test__proxy__pb2.SampleRowKeysResult.FromString, + ) + self.ReadModifyWriteRow = channel.unary_unary( + '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadModifyWriteRow', + request_serializer=test__proxy__pb2.ReadModifyWriteRowRequest.SerializeToString, + response_deserializer=test__proxy__pb2.RowResult.FromString, + ) + + +class CloudBigtableV2TestProxyServicer(object): + """Note that all RPCs are unary, even when the equivalent client binding call + may be streaming. This is an intentional simplification. + + Most methods have sync (default) and async variants. For async variants, + the proxy is expected to perform the async operation, then wait for results + before delivering them back to the driver client. + + Operations that may have interesting concurrency characteristics are + represented explicitly in the API (see ReadRowsRequest.cancel_after_rows). + We include such operations only when they can be meaningfully performed + through client bindings. + + Users should generally avoid setting deadlines for requests to the Proxy + because operations are not cancelable. If the deadline is set anyway, please + understand that the underlying operation will continue to be executed even + after the deadline expires. + """ + + def CreateClient(self, request, context): + """Client management: + + Creates a client in the proxy. + Each client has its own dedicated channel(s), and can be used concurrently + and independently with other clients. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CloseClient(self, request, context): + """Closes a client in the proxy, making it not accept new requests. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def RemoveClient(self, request, context): + """Removes a client in the proxy, making it inaccessible. Client closing + should be done by CloseClient() separately. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadRow(self, request, context): + """Bigtable operations: for each operation, you should use the synchronous or + asynchronous variant of the client method based on the `use_async_method` + setting of the client instance. For starters, you can choose to implement + one variant, and return UNIMPLEMENTED status for the other. + + Reads a row with the client instance. + The result row may not be present in the response. + Callers should check for it (e.g. calling has_row() in C++). + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadRows(self, request, context): + """Reads rows with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def MutateRow(self, request, context): + """Writes a row with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def BulkMutateRows(self, request, context): + """Writes multiple rows with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CheckAndMutateRow(self, request, context): + """Performs a check-and-mutate-row operation with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SampleRowKeys(self, request, context): + """Obtains a row key sampling with the client instance. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReadModifyWriteRow(self, request, context): + """Performs a read-modify-write operation with the client. + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_CloudBigtableV2TestProxyServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CreateClient': grpc.unary_unary_rpc_method_handler( + servicer.CreateClient, + request_deserializer=test__proxy__pb2.CreateClientRequest.FromString, + response_serializer=test__proxy__pb2.CreateClientResponse.SerializeToString, + ), + 'CloseClient': grpc.unary_unary_rpc_method_handler( + servicer.CloseClient, + request_deserializer=test__proxy__pb2.CloseClientRequest.FromString, + response_serializer=test__proxy__pb2.CloseClientResponse.SerializeToString, + ), + 'RemoveClient': grpc.unary_unary_rpc_method_handler( + servicer.RemoveClient, + request_deserializer=test__proxy__pb2.RemoveClientRequest.FromString, + response_serializer=test__proxy__pb2.RemoveClientResponse.SerializeToString, + ), + 'ReadRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadRow, + request_deserializer=test__proxy__pb2.ReadRowRequest.FromString, + response_serializer=test__proxy__pb2.RowResult.SerializeToString, + ), + 'ReadRows': grpc.unary_unary_rpc_method_handler( + servicer.ReadRows, + request_deserializer=test__proxy__pb2.ReadRowsRequest.FromString, + response_serializer=test__proxy__pb2.RowsResult.SerializeToString, + ), + 'MutateRow': grpc.unary_unary_rpc_method_handler( + servicer.MutateRow, + request_deserializer=test__proxy__pb2.MutateRowRequest.FromString, + response_serializer=test__proxy__pb2.MutateRowResult.SerializeToString, + ), + 'BulkMutateRows': grpc.unary_unary_rpc_method_handler( + servicer.BulkMutateRows, + request_deserializer=test__proxy__pb2.MutateRowsRequest.FromString, + response_serializer=test__proxy__pb2.MutateRowsResult.SerializeToString, + ), + 'CheckAndMutateRow': grpc.unary_unary_rpc_method_handler( + servicer.CheckAndMutateRow, + request_deserializer=test__proxy__pb2.CheckAndMutateRowRequest.FromString, + response_serializer=test__proxy__pb2.CheckAndMutateRowResult.SerializeToString, + ), + 'SampleRowKeys': grpc.unary_unary_rpc_method_handler( + servicer.SampleRowKeys, + request_deserializer=test__proxy__pb2.SampleRowKeysRequest.FromString, + response_serializer=test__proxy__pb2.SampleRowKeysResult.SerializeToString, + ), + 'ReadModifyWriteRow': grpc.unary_unary_rpc_method_handler( + servicer.ReadModifyWriteRow, + request_deserializer=test__proxy__pb2.ReadModifyWriteRowRequest.FromString, + response_serializer=test__proxy__pb2.RowResult.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'google.bigtable.testproxy.CloudBigtableV2TestProxy', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class CloudBigtableV2TestProxy(object): + """Note that all RPCs are unary, even when the equivalent client binding call + may be streaming. This is an intentional simplification. + + Most methods have sync (default) and async variants. For async variants, + the proxy is expected to perform the async operation, then wait for results + before delivering them back to the driver client. + + Operations that may have interesting concurrency characteristics are + represented explicitly in the API (see ReadRowsRequest.cancel_after_rows). + We include such operations only when they can be meaningfully performed + through client bindings. + + Users should generally avoid setting deadlines for requests to the Proxy + because operations are not cancelable. If the deadline is set anyway, please + understand that the underlying operation will continue to be executed even + after the deadline expires. + """ + + @staticmethod + def CreateClient(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CreateClient', + test__proxy__pb2.CreateClientRequest.SerializeToString, + test__proxy__pb2.CreateClientResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CloseClient(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CloseClient', + test__proxy__pb2.CloseClientRequest.SerializeToString, + test__proxy__pb2.CloseClientResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def RemoveClient(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/RemoveClient', + test__proxy__pb2.RemoveClientRequest.SerializeToString, + test__proxy__pb2.RemoveClientResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReadRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRow', + test__proxy__pb2.ReadRowRequest.SerializeToString, + test__proxy__pb2.RowResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReadRows(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadRows', + test__proxy__pb2.ReadRowsRequest.SerializeToString, + test__proxy__pb2.RowsResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def MutateRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/MutateRow', + test__proxy__pb2.MutateRowRequest.SerializeToString, + test__proxy__pb2.MutateRowResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def BulkMutateRows(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/BulkMutateRows', + test__proxy__pb2.MutateRowsRequest.SerializeToString, + test__proxy__pb2.MutateRowsResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CheckAndMutateRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/CheckAndMutateRow', + test__proxy__pb2.CheckAndMutateRowRequest.SerializeToString, + test__proxy__pb2.CheckAndMutateRowResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def SampleRowKeys(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/SampleRowKeys', + test__proxy__pb2.SampleRowKeysRequest.SerializeToString, + test__proxy__pb2.SampleRowKeysResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReadModifyWriteRow(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/google.bigtable.testproxy.CloudBigtableV2TestProxy/ReadModifyWriteRow', + test__proxy__pb2.ReadModifyWriteRowRequest.SerializeToString, + test__proxy__pb2.RowResult.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/test_proxy/run_tests.sh b/test_proxy/run_tests.sh new file mode 100755 index 000000000..15b146b03 --- /dev/null +++ b/test_proxy/run_tests.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and + +# attempt download golang if not found +if [[ ! -x "$(command -v go)" ]]; then + echo "Downloading golang..." + wget https://go.dev/dl/go1.20.2.linux-amd64.tar.gz + tar -xzf go1.20.2.linux-amd64.tar.gz + export GOROOT=$(pwd)/go + export PATH=$GOROOT/bin:$PATH + export GOPATH=$HOME/go + go version +fi + +# ensure the working dir is the script's folder +SCRIPT_DIR=$(realpath $(dirname "$0")) +cd $SCRIPT_DIR + +export PROXY_SERVER_PORT=50055 + +# download test suite +if [ ! -d "cloud-bigtable-clients-test" ]; then + git clone https://github.com/googleapis/cloud-bigtable-clients-test.git +fi + +# start proxy +python test_proxy.py --port $PROXY_SERVER_PORT & +PROXY_PID=$! +function finish { + kill $PROXY_PID +} +trap finish EXIT + +# run tests +pushd cloud-bigtable-clients-test/tests +go test -v -proxy_addr=:$PROXY_SERVER_PORT diff --git a/test_proxy/test_proxy.py b/test_proxy/test_proxy.py new file mode 100644 index 000000000..a0cf2f1f0 --- /dev/null +++ b/test_proxy/test_proxy.py @@ -0,0 +1,193 @@ +# Copyright 2023 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +""" +The Python implementation of the `cloud-bigtable-clients-test` proxy server. + +https://github.com/googleapis/cloud-bigtable-clients-test + +This server is intended to be used to test the correctness of Bigtable +clients across languages. + +Contributor Note: the proxy implementation is split across TestProxyClientHandler +and TestProxyGrpcServer. This is due to the fact that generated protos and proto-plus +objects cannot be used in the same process, so we had to make use of the +multiprocessing module to allow them to work together. +""" + +import multiprocessing +import argparse +import sys +import os +sys.path.append("handlers") + + +def grpc_server_process(request_q, queue_pool, port=50055): + """ + Defines a process that hosts a grpc server + proxies requests to a client_handler_process + """ + sys.path.append("protos") + from concurrent import futures + + import grpc + import test_proxy_pb2_grpc + import grpc_handler + + # Start gRPC server + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10)) + test_proxy_pb2_grpc.add_CloudBigtableV2TestProxyServicer_to_server( + grpc_handler.TestProxyGrpcServer(request_q, queue_pool), server + ) + server.add_insecure_port("[::]:" + port) + server.start() + print("grpc_server_process started, listening on " + port) + server.wait_for_termination() + + +async def client_handler_process_async(request_q, queue_pool, use_legacy_client=False): + """ + Defines a process that recives Bigtable requests from a grpc_server_process, + and runs the request using a client library instance + """ + import base64 + import re + import asyncio + import warnings + import client_handler_data + import client_handler_legacy + warnings.filterwarnings("ignore", category=RuntimeWarning, message=".*Bigtable emulator.*") + + def camel_to_snake(str): + return re.sub(r"(?