From 551f8b1f53cb96a614d2b623e71f43d83b867eaf Mon Sep 17 00:00:00 2001 From: Igor Berntein Date: Wed, 15 May 2024 12:10:12 -0400 Subject: [PATCH 1/3] doc: add samples for filtering using async apis --- .../snippets/filters/filter_snippets_async.py | 335 ++++++++++++++++ .../filters/filter_snippets_async_test.py | 356 ++++++++++++++++++ 2 files changed, 691 insertions(+) create mode 100644 samples/snippets/filters/filter_snippets_async.py create mode 100644 samples/snippets/filters/filter_snippets_async_test.py diff --git a/samples/snippets/filters/filter_snippets_async.py b/samples/snippets/filters/filter_snippets_async.py new file mode 100644 index 000000000..76f2c0dd1 --- /dev/null +++ b/samples/snippets/filters/filter_snippets_async.py @@ -0,0 +1,335 @@ +# Copyright 2024, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import datetime +from google.cloud.bigtable.data import Row +from google.cloud._helpers import _datetime_from_microseconds + + +# [START bigtable_filters_limit_row_sample] +async def filter_limit_row_sample(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.RowSampleFilter(0.75)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_row_sample] +# [START bigtable_filters_limit_row_regex] +async def filter_limit_row_regex(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.RowKeyRegexFilter(".*#20190501$".encode("utf-8"))) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_row_regex] +# [START bigtable_filters_limit_cells_per_col] +async def filter_limit_cells_per_col(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.CellsColumnLimitFilter(2)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_cells_per_col] +# [START bigtable_filters_limit_cells_per_row] +async def filter_limit_cells_per_row(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.CellsRowLimitFilter(2)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_cells_per_row] +# [START bigtable_filters_limit_cells_per_row_offset] +async def filter_limit_cells_per_row_offset(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.CellsRowOffsetFilter(2)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_cells_per_row_offset] +# [START bigtable_filters_limit_col_family_regex] +async def filter_limit_col_family_regex(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.FamilyNameRegexFilter("stats_.*$".encode("utf-8")) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_col_family_regex] +# [START bigtable_filters_limit_col_qualifier_regex] +async def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.ColumnQualifierRegexFilter("connected_.*$".encode("utf-8")) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_col_qualifier_regex] +# [START bigtable_filters_limit_col_range] +async def filter_limit_col_range(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.ColumnRangeFilter( + "cell_plan", b"data_plan_01gb", b"data_plan_10gb", inclusive_end=False + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_col_range] +# [START bigtable_filters_limit_value_range] +async def filter_limit_value_range(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.ValueRangeFilter(b"PQ2A.190405", b"PQ2A.190406") + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_value_range] +# [START bigtable_filters_limit_value_regex] + + +async def filter_limit_value_regex(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.ValueRegexFilter("PQ2A.*$".encode("utf-8")) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_value_regex] +# [START bigtable_filters_limit_timestamp_range] +async def filter_limit_timestamp_range(project_id, instance_id, table_id): + import datetime + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + end = datetime.datetime(2019, 5, 1) + + query = ReadRowsQuery(row_filter=row_filters.TimestampRangeFilter(end=end)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_timestamp_range] +# [START bigtable_filters_limit_block_all] +async def filter_limit_block_all(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.BlockAllFilter(True)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_block_all] +# [START bigtable_filters_limit_pass_all] +async def filter_limit_pass_all(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.PassAllFilter(True)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_limit_pass_all] +# [START bigtable_filters_modify_strip_value] +async def filter_modify_strip_value(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.StripValueTransformerFilter(True)) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_modify_strip_value] +# [START bigtable_filters_modify_apply_label] +async def filter_modify_apply_label(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery(row_filter=row_filters.ApplyLabelFilter(label="labelled")) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_modify_apply_label] +# [START bigtable_filters_composing_chain] +async def filter_composing_chain(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.RowFilterChain( + filters=[ + row_filters.CellsColumnLimitFilter(1), + row_filters.FamilyNameRegexFilter("cell_plan"), + ] + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_composing_chain] +# [START bigtable_filters_composing_interleave] +async def filter_composing_interleave(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.RowFilterUnion( + filters=[ + row_filters.ValueRegexFilter("true"), + row_filters.ColumnQualifierRegexFilter("os_build"), + ] + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_composing_interleave] +# [START bigtable_filters_composing_condition] +async def filter_composing_condition(project_id, instance_id, table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.ConditionalRowFilter( + predicate_filter=row_filters.RowFilterChain( + filters=[ + row_filters.ColumnQualifierRegexFilter("data_plan_10gb"), + row_filters.ValueRegexFilter("true"), ] + ), + true_filter=row_filters.ApplyLabelFilter(label="passed-filter"), + false_filter=row_filters.ApplyLabelFilter(label="filtered-out"), + ) + ) + + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) + + +# [END bigtable_filters_composing_condition] +# [END_EXCLUDE] + + +def print_row(row: Row): + print("Reading data for {}:".format(row.row_key.decode("utf-8"))) + last_family = None + for cell in row.cells: + if last_family != cell.family: + print("Column Family {}".format(cell.family)) + last_family = cell.family + + labels = ( + " [{}]".format(",".join(cell.labels)) if len(cell.labels) else "" + ) + print( + "\t{}: {} @{}{}".format( + cell.qualifier.decode("utf-8"), + cell.value.decode("utf-8"), + _datetime_from_microseconds(cell.timestamp_micros), + labels, + ) + ) + print("") diff --git a/samples/snippets/filters/filter_snippets_async_test.py b/samples/snippets/filters/filter_snippets_async_test.py new file mode 100644 index 000000000..d62a8ebca --- /dev/null +++ b/samples/snippets/filters/filter_snippets_async_test.py @@ -0,0 +1,356 @@ +# Copyright 2020, Google LLC +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import datetime +import os +import time + +import inspect +from typing import AsyncGenerator + +import pytest +import pytest_asyncio +from .snapshots.snap_filters_test import snapshots + +from . import filter_snippets_async +from google.cloud._helpers import _microseconds_from_datetime, \ + _datetime_from_microseconds + +PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] +BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] +TABLE_ID_PREFIX = "mobile-time-series-{}" + + +@pytest_asyncio.fixture +async def table_id() -> AsyncGenerator[str, None]: + table_id = _create_table() + await _populate_table(table_id) + yield table_id + _delete_table(table_id) + + +def _create_table(): + from google.cloud import bigtable + import uuid + + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + + table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) + table = instance.table(table_id) + if table.exists(): + table.delete() + + table.create(column_families={"stats_summary": None, "cell_plan": None}) + return table_id + + +def _delete_table(table_id: str): + from google.cloud import bigtable + + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + table = instance.table(table_id) + table.delete() + + +async def _populate_table(table_id): + from google.cloud.bigtable.data import BigtableDataClientAsync, \ + RowMutationEntry, SetCell + + timestamp = datetime.datetime(2019, 5, 1) + timestamp_minus_hr = timestamp - datetime.timedelta(hours=1) + + async with (BigtableDataClientAsync(project=PROJECT) as client): + async with client.get_table(BIGTABLE_INSTANCE, table_id) as table: + async with table.mutations_batcher() as batcher: + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190501", + [ + SetCell("stats_summary", "connected_cell", 1, + _microseconds_from_datetime(timestamp)), + SetCell("stats_summary", "connected_cell", 1, + _microseconds_from_datetime(timestamp)), + SetCell("stats_summary", "connected_wifi", 1, + _microseconds_from_datetime(timestamp)), + SetCell("stats_summary", "os_build", "PQ2A.190405.003", + _microseconds_from_datetime(timestamp)), + SetCell("cell_plan", "data_plan_01gb", "true", + _microseconds_from_datetime(timestamp_minus_hr)), + SetCell("cell_plan", "data_plan_01gb", "false", + _microseconds_from_datetime(timestamp)), + SetCell("cell_plan", "data_plan_05gb", "true", + _microseconds_from_datetime(timestamp)), + ])) + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190502", + [ + SetCell("stats_summary", "connected_cell", 1, + _microseconds_from_datetime(timestamp)), + SetCell("stats_summary", "connected_wifi", 1, + _microseconds_from_datetime(timestamp)), + SetCell("stats_summary", "os_build", "PQ2A.190405.004", + _microseconds_from_datetime(timestamp)), + SetCell("cell_plan", "data_plan_05gb", "true", + _microseconds_from_datetime(timestamp)), + ] + )) + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190505", + [ + SetCell("stats_summary", "connected_cell", 0, + _microseconds_from_datetime(timestamp)), + SetCell("stats_summary", "connected_wifi", 1, + _microseconds_from_datetime(timestamp)), + SetCell("stats_summary", "os_build", "PQ2A.190406.000", + _microseconds_from_datetime(timestamp)), + SetCell("cell_plan", "data_plan_05gb", "true", + _microseconds_from_datetime(timestamp)), + ] + ) + ) + await batcher.append( + RowMutationEntry( + "phone#5c10102#20190501", + [ + SetCell("stats_summary", "connected_cell", 1, + _microseconds_from_datetime(timestamp)), + SetCell("stats_summary", "connected_wifi", 1, + _microseconds_from_datetime(timestamp)), + SetCell("stats_summary", "os_build", "PQ2A.190401.002", + _microseconds_from_datetime(timestamp)), + SetCell("cell_plan", "data_plan_10gb", "true", + _microseconds_from_datetime(timestamp)), + ] + )) + await batcher.append( + RowMutationEntry( + "phone#5c10102#20190502", + [ + SetCell("stats_summary", "connected_cell", 1, + _microseconds_from_datetime(timestamp)), + SetCell("stats_summary", "connected_wifi", 0, + _microseconds_from_datetime(timestamp)), + SetCell("stats_summary", "os_build", "PQ2A.190406.000", + _microseconds_from_datetime(timestamp)), + SetCell("cell_plan", "data_plan_10gb", "true", + _microseconds_from_datetime(timestamp)), + ] + )) + + +def _datetime_to_micros(value: datetime.datetime) -> int: + """Uses the same conversion rules as the old client in""" + if not value.tzinfo: + value = value.replace(tzinfo=datetime.timezone.utc) + # Regardless of what timezone is on the value, convert it to UTC. + value = value.astimezone(datetime.timezone.utc) + # Convert the datetime to a microsecond timestamp. + return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond + return int(dt.timestamp() * 1000 * 1000) + + +@pytest.mark.asyncio +async def test_filter_limit_row_sample(capsys, table_id): + await filter_snippets_async.filter_limit_row_sample(PROJECT, + BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + assert "Reading data for" in out + + +@pytest.mark.asyncio +async def test_filter_limit_row_regex(capsys, table_id): + await filter_snippets_async.filter_limit_row_regex(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_cells_per_col(capsys, table_id): + await filter_snippets_async.filter_limit_cells_per_col(PROJECT, + BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_cells_per_row(capsys, table_id): + await filter_snippets_async.filter_limit_cells_per_row(PROJECT, + BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_cells_per_row_offset(capsys, table_id): + await filter_snippets_async.filter_limit_cells_per_row_offset( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_col_family_regex(capsys, table_id): + await filter_snippets_async.filter_limit_col_family_regex(PROJECT, + BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_col_qualifier_regex(capsys, table_id): + await filter_snippets_async.filter_limit_col_qualifier_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_col_range(capsys, table_id): + await filter_snippets_async.filter_limit_col_range(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_value_range(capsys, table_id): + await filter_snippets_async.filter_limit_value_range(PROJECT, + BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_value_regex(capsys, table_id): + await filter_snippets_async.filter_limit_value_regex(PROJECT, + BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_timestamp_range(capsys, table_id): + await filter_snippets_async.filter_limit_timestamp_range(PROJECT, + BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_block_all(capsys, table_id): + await filter_snippets_async.filter_limit_block_all(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_limit_pass_all(capsys, table_id): + await filter_snippets_async.filter_limit_pass_all(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_modify_strip_value(capsys, table_id): + await filter_snippets_async.filter_modify_strip_value(PROJECT, + BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_modify_apply_label(capsys, table_id): + await filter_snippets_async.filter_modify_apply_label(PROJECT, + BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_composing_chain(capsys, table_id): + await filter_snippets_async.filter_composing_chain(PROJECT, BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_composing_interleave(capsys, table_id): + await filter_snippets_async.filter_composing_interleave(PROJECT, + BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected + + +@pytest.mark.asyncio +async def test_filter_composing_condition(capsys, table_id): + await filter_snippets_async.filter_composing_condition(PROJECT, + BIGTABLE_INSTANCE, + table_id) + + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected From dcdf643957df6d07fbd351cad6c64fbb60df4127 Mon Sep 17 00:00:00 2001 From: Igor Berntein Date: Wed, 15 May 2024 12:17:05 -0400 Subject: [PATCH 2/3] format --- .../snippets/filters/filter_snippets_async.py | 368 ++++++------ .../filters/filter_snippets_async_test.py | 539 +++++++++++------- 2 files changed, 508 insertions(+), 399 deletions(-) diff --git a/samples/snippets/filters/filter_snippets_async.py b/samples/snippets/filters/filter_snippets_async.py index 76f2c0dd1..aec4b22cd 100644 --- a/samples/snippets/filters/filter_snippets_async.py +++ b/samples/snippets/filters/filter_snippets_async.py @@ -18,138 +18,141 @@ # [START bigtable_filters_limit_row_sample] async def filter_limit_row_sample(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery(row_filter=row_filters.RowSampleFilter(0.75)) + query = ReadRowsQuery(row_filter=row_filters.RowSampleFilter(0.75)) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_limit_row_sample] # [START bigtable_filters_limit_row_regex] async def filter_limit_row_regex(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery( - row_filter=row_filters.RowKeyRegexFilter(".*#20190501$".encode("utf-8"))) + query = ReadRowsQuery( + row_filter=row_filters.RowKeyRegexFilter(".*#20190501$".encode("utf-8")) + ) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_limit_row_regex] # [START bigtable_filters_limit_cells_per_col] async def filter_limit_cells_per_col(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery(row_filter=row_filters.CellsColumnLimitFilter(2)) + query = ReadRowsQuery(row_filter=row_filters.CellsColumnLimitFilter(2)) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_limit_cells_per_col] # [START bigtable_filters_limit_cells_per_row] async def filter_limit_cells_per_row(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery(row_filter=row_filters.CellsRowLimitFilter(2)) + query = ReadRowsQuery(row_filter=row_filters.CellsRowLimitFilter(2)) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_limit_cells_per_row] # [START bigtable_filters_limit_cells_per_row_offset] async def filter_limit_cells_per_row_offset(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery(row_filter=row_filters.CellsRowOffsetFilter(2)) + query = ReadRowsQuery(row_filter=row_filters.CellsRowOffsetFilter(2)) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_limit_cells_per_row_offset] # [START bigtable_filters_limit_col_family_regex] async def filter_limit_col_family_regex(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery( - row_filter=row_filters.FamilyNameRegexFilter("stats_.*$".encode("utf-8")) - ) + query = ReadRowsQuery( + row_filter=row_filters.FamilyNameRegexFilter("stats_.*$".encode("utf-8")) + ) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_limit_col_family_regex] # [START bigtable_filters_limit_col_qualifier_regex] async def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery( - row_filter=row_filters.ColumnQualifierRegexFilter("connected_.*$".encode("utf-8")) - ) + query = ReadRowsQuery( + row_filter=row_filters.ColumnQualifierRegexFilter( + "connected_.*$".encode("utf-8") + ) + ) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_limit_col_qualifier_regex] # [START bigtable_filters_limit_col_range] async def filter_limit_col_range(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery( - row_filter=row_filters.ColumnRangeFilter( - "cell_plan", b"data_plan_01gb", b"data_plan_10gb", inclusive_end=False + query = ReadRowsQuery( + row_filter=row_filters.ColumnRangeFilter( + "cell_plan", b"data_plan_01gb", b"data_plan_10gb", inclusive_end=False + ) ) - ) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_limit_col_range] # [START bigtable_filters_limit_value_range] async def filter_limit_value_range(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery( - row_filter=row_filters.ValueRangeFilter(b"PQ2A.190405", b"PQ2A.190406") - ) + query = ReadRowsQuery( + row_filter=row_filters.ValueRangeFilter(b"PQ2A.190405", b"PQ2A.190406") + ) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_limit_value_range] @@ -157,156 +160,157 @@ async def filter_limit_value_range(project_id, instance_id, table_id): async def filter_limit_value_regex(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery( - row_filter=row_filters.ValueRegexFilter("PQ2A.*$".encode("utf-8")) - ) + query = ReadRowsQuery( + row_filter=row_filters.ValueRegexFilter("PQ2A.*$".encode("utf-8")) + ) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_limit_value_regex] # [START bigtable_filters_limit_timestamp_range] async def filter_limit_timestamp_range(project_id, instance_id, table_id): - import datetime - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + import datetime + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - end = datetime.datetime(2019, 5, 1) + end = datetime.datetime(2019, 5, 1) - query = ReadRowsQuery(row_filter=row_filters.TimestampRangeFilter(end=end)) + query = ReadRowsQuery(row_filter=row_filters.TimestampRangeFilter(end=end)) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_limit_timestamp_range] # [START bigtable_filters_limit_block_all] async def filter_limit_block_all(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery(row_filter=row_filters.BlockAllFilter(True)) + query = ReadRowsQuery(row_filter=row_filters.BlockAllFilter(True)) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_limit_block_all] # [START bigtable_filters_limit_pass_all] async def filter_limit_pass_all(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery(row_filter=row_filters.PassAllFilter(True)) + query = ReadRowsQuery(row_filter=row_filters.PassAllFilter(True)) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_limit_pass_all] # [START bigtable_filters_modify_strip_value] async def filter_modify_strip_value(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery(row_filter=row_filters.StripValueTransformerFilter(True)) + query = ReadRowsQuery(row_filter=row_filters.StripValueTransformerFilter(True)) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_modify_strip_value] # [START bigtable_filters_modify_apply_label] async def filter_modify_apply_label(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters - query = ReadRowsQuery(row_filter=row_filters.ApplyLabelFilter(label="labelled")) + query = ReadRowsQuery(row_filter=row_filters.ApplyLabelFilter(label="labelled")) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_modify_apply_label] # [START bigtable_filters_composing_chain] async def filter_composing_chain(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters - - query = ReadRowsQuery( - row_filter=row_filters.RowFilterChain( - filters=[ - row_filters.CellsColumnLimitFilter(1), - row_filters.FamilyNameRegexFilter("cell_plan"), - ] + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.RowFilterChain( + filters=[ + row_filters.CellsColumnLimitFilter(1), + row_filters.FamilyNameRegexFilter("cell_plan"), + ] + ) ) - ) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_composing_chain] # [START bigtable_filters_composing_interleave] async def filter_composing_interleave(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters - - query = ReadRowsQuery( - row_filter=row_filters.RowFilterUnion( - filters=[ - row_filters.ValueRegexFilter("true"), - row_filters.ColumnQualifierRegexFilter("os_build"), - ] + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.RowFilterUnion( + filters=[ + row_filters.ValueRegexFilter("true"), + row_filters.ColumnQualifierRegexFilter("os_build"), + ] + ) ) - ) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_composing_interleave] # [START bigtable_filters_composing_condition] async def filter_composing_condition(project_id, instance_id, table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery - from google.cloud.bigtable.data import row_filters - - query = ReadRowsQuery( - row_filter=row_filters.ConditionalRowFilter( - predicate_filter=row_filters.RowFilterChain( - filters=[ - row_filters.ColumnQualifierRegexFilter("data_plan_10gb"), - row_filters.ValueRegexFilter("true"), ] - ), - true_filter=row_filters.ApplyLabelFilter(label="passed-filter"), - false_filter=row_filters.ApplyLabelFilter(label="filtered-out"), + from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery + from google.cloud.bigtable.data import row_filters + + query = ReadRowsQuery( + row_filter=row_filters.ConditionalRowFilter( + predicate_filter=row_filters.RowFilterChain( + filters=[ + row_filters.ColumnQualifierRegexFilter("data_plan_10gb"), + row_filters.ValueRegexFilter("true"), + ] + ), + true_filter=row_filters.ApplyLabelFilter(label="passed-filter"), + false_filter=row_filters.ApplyLabelFilter(label="filtered-out"), + ) ) - ) - async with BigtableDataClientAsync(project=project_id) as client: - async with client.get_table(instance_id, table_id) as table: - for row in await table.read_rows(query): - print_row(row) + async with BigtableDataClientAsync(project=project_id) as client: + async with client.get_table(instance_id, table_id) as table: + for row in await table.read_rows(query): + print_row(row) # [END bigtable_filters_composing_condition] @@ -314,22 +318,20 @@ async def filter_composing_condition(project_id, instance_id, table_id): def print_row(row: Row): - print("Reading data for {}:".format(row.row_key.decode("utf-8"))) - last_family = None - for cell in row.cells: - if last_family != cell.family: - print("Column Family {}".format(cell.family)) - last_family = cell.family - - labels = ( - " [{}]".format(",".join(cell.labels)) if len(cell.labels) else "" - ) - print( - "\t{}: {} @{}{}".format( - cell.qualifier.decode("utf-8"), - cell.value.decode("utf-8"), - _datetime_from_microseconds(cell.timestamp_micros), - labels, - ) - ) - print("") + print("Reading data for {}:".format(row.row_key.decode("utf-8"))) + last_family = None + for cell in row.cells: + if last_family != cell.family: + print("Column Family {}".format(cell.family)) + last_family = cell.family + + labels = " [{}]".format(",".join(cell.labels)) if len(cell.labels) else "" + print( + "\t{}: {} @{}{}".format( + cell.qualifier.decode("utf-8"), + cell.value.decode("utf-8"), + _datetime_from_microseconds(cell.timestamp_micros), + labels, + ) + ) + print("") diff --git a/samples/snippets/filters/filter_snippets_async_test.py b/samples/snippets/filters/filter_snippets_async_test.py index d62a8ebca..18c93102d 100644 --- a/samples/snippets/filters/filter_snippets_async_test.py +++ b/samples/snippets/filters/filter_snippets_async_test.py @@ -24,8 +24,10 @@ from .snapshots.snap_filters_test import snapshots from . import filter_snippets_async -from google.cloud._helpers import _microseconds_from_datetime, \ - _datetime_from_microseconds +from google.cloud._helpers import ( + _microseconds_from_datetime, + _datetime_from_microseconds, +) PROJECT = os.environ["GOOGLE_CLOUD_PROJECT"] BIGTABLE_INSTANCE = os.environ["BIGTABLE_INSTANCE"] @@ -34,323 +36,428 @@ @pytest_asyncio.fixture async def table_id() -> AsyncGenerator[str, None]: - table_id = _create_table() - await _populate_table(table_id) - yield table_id - _delete_table(table_id) + table_id = _create_table() + await _populate_table(table_id) + yield table_id + _delete_table(table_id) def _create_table(): - from google.cloud import bigtable - import uuid + from google.cloud import bigtable + import uuid - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) - table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) - table = instance.table(table_id) - if table.exists(): - table.delete() + table_id = TABLE_ID_PREFIX.format(str(uuid.uuid4())[:16]) + table = instance.table(table_id) + if table.exists(): + table.delete() - table.create(column_families={"stats_summary": None, "cell_plan": None}) - return table_id + table.create(column_families={"stats_summary": None, "cell_plan": None}) + return table_id def _delete_table(table_id: str): - from google.cloud import bigtable + from google.cloud import bigtable - client = bigtable.Client(project=PROJECT, admin=True) - instance = client.instance(BIGTABLE_INSTANCE) - table = instance.table(table_id) - table.delete() + client = bigtable.Client(project=PROJECT, admin=True) + instance = client.instance(BIGTABLE_INSTANCE) + table = instance.table(table_id) + table.delete() async def _populate_table(table_id): - from google.cloud.bigtable.data import BigtableDataClientAsync, \ - RowMutationEntry, SetCell - - timestamp = datetime.datetime(2019, 5, 1) - timestamp_minus_hr = timestamp - datetime.timedelta(hours=1) - - async with (BigtableDataClientAsync(project=PROJECT) as client): - async with client.get_table(BIGTABLE_INSTANCE, table_id) as table: - async with table.mutations_batcher() as batcher: - await batcher.append( - RowMutationEntry( - "phone#4c410523#20190501", - [ - SetCell("stats_summary", "connected_cell", 1, - _microseconds_from_datetime(timestamp)), - SetCell("stats_summary", "connected_cell", 1, - _microseconds_from_datetime(timestamp)), - SetCell("stats_summary", "connected_wifi", 1, - _microseconds_from_datetime(timestamp)), - SetCell("stats_summary", "os_build", "PQ2A.190405.003", - _microseconds_from_datetime(timestamp)), - SetCell("cell_plan", "data_plan_01gb", "true", - _microseconds_from_datetime(timestamp_minus_hr)), - SetCell("cell_plan", "data_plan_01gb", "false", - _microseconds_from_datetime(timestamp)), - SetCell("cell_plan", "data_plan_05gb", "true", - _microseconds_from_datetime(timestamp)), - ])) - await batcher.append( - RowMutationEntry( - "phone#4c410523#20190502", - [ - SetCell("stats_summary", "connected_cell", 1, - _microseconds_from_datetime(timestamp)), - SetCell("stats_summary", "connected_wifi", 1, - _microseconds_from_datetime(timestamp)), - SetCell("stats_summary", "os_build", "PQ2A.190405.004", - _microseconds_from_datetime(timestamp)), - SetCell("cell_plan", "data_plan_05gb", "true", - _microseconds_from_datetime(timestamp)), - ] - )) - await batcher.append( - RowMutationEntry( - "phone#4c410523#20190505", - [ - SetCell("stats_summary", "connected_cell", 0, - _microseconds_from_datetime(timestamp)), - SetCell("stats_summary", "connected_wifi", 1, - _microseconds_from_datetime(timestamp)), - SetCell("stats_summary", "os_build", "PQ2A.190406.000", - _microseconds_from_datetime(timestamp)), - SetCell("cell_plan", "data_plan_05gb", "true", - _microseconds_from_datetime(timestamp)), - ] - ) - ) - await batcher.append( - RowMutationEntry( - "phone#5c10102#20190501", - [ - SetCell("stats_summary", "connected_cell", 1, - _microseconds_from_datetime(timestamp)), - SetCell("stats_summary", "connected_wifi", 1, - _microseconds_from_datetime(timestamp)), - SetCell("stats_summary", "os_build", "PQ2A.190401.002", - _microseconds_from_datetime(timestamp)), - SetCell("cell_plan", "data_plan_10gb", "true", - _microseconds_from_datetime(timestamp)), - ] - )) - await batcher.append( - RowMutationEntry( - "phone#5c10102#20190502", - [ - SetCell("stats_summary", "connected_cell", 1, - _microseconds_from_datetime(timestamp)), - SetCell("stats_summary", "connected_wifi", 0, - _microseconds_from_datetime(timestamp)), - SetCell("stats_summary", "os_build", "PQ2A.190406.000", - _microseconds_from_datetime(timestamp)), - SetCell("cell_plan", "data_plan_10gb", "true", - _microseconds_from_datetime(timestamp)), - ] - )) + from google.cloud.bigtable.data import ( + BigtableDataClientAsync, + RowMutationEntry, + SetCell, + ) + + timestamp = datetime.datetime(2019, 5, 1) + timestamp_minus_hr = timestamp - datetime.timedelta(hours=1) + + async with (BigtableDataClientAsync(project=PROJECT) as client): + async with client.get_table(BIGTABLE_INSTANCE, table_id) as table: + async with table.mutations_batcher() as batcher: + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190501", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190405.003", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_01gb", + "true", + _microseconds_from_datetime(timestamp_minus_hr), + ), + SetCell( + "cell_plan", + "data_plan_01gb", + "false", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190502", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190405.004", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#4c410523#20190505", + [ + SetCell( + "stats_summary", + "connected_cell", + 0, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190406.000", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_05gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#5c10102#20190501", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190401.002", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_10gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) + await batcher.append( + RowMutationEntry( + "phone#5c10102#20190502", + [ + SetCell( + "stats_summary", + "connected_cell", + 1, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "connected_wifi", + 0, + _microseconds_from_datetime(timestamp), + ), + SetCell( + "stats_summary", + "os_build", + "PQ2A.190406.000", + _microseconds_from_datetime(timestamp), + ), + SetCell( + "cell_plan", + "data_plan_10gb", + "true", + _microseconds_from_datetime(timestamp), + ), + ], + ) + ) def _datetime_to_micros(value: datetime.datetime) -> int: - """Uses the same conversion rules as the old client in""" - if not value.tzinfo: - value = value.replace(tzinfo=datetime.timezone.utc) - # Regardless of what timezone is on the value, convert it to UTC. - value = value.astimezone(datetime.timezone.utc) - # Convert the datetime to a microsecond timestamp. - return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond - return int(dt.timestamp() * 1000 * 1000) + """Uses the same conversion rules as the old client in""" + if not value.tzinfo: + value = value.replace(tzinfo=datetime.timezone.utc) + # Regardless of what timezone is on the value, convert it to UTC. + value = value.astimezone(datetime.timezone.utc) + # Convert the datetime to a microsecond timestamp. + return int(calendar.timegm(value.timetuple()) * 1e6) + value.microsecond + return int(dt.timestamp() * 1000 * 1000) @pytest.mark.asyncio async def test_filter_limit_row_sample(capsys, table_id): - await filter_snippets_async.filter_limit_row_sample(PROJECT, - BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_limit_row_sample( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - assert "Reading data for" in out + out, _ = capsys.readouterr() + assert "Reading data for" in out @pytest.mark.asyncio async def test_filter_limit_row_regex(capsys, table_id): - await filter_snippets_async.filter_limit_row_regex(PROJECT, BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_limit_row_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_limit_cells_per_col(capsys, table_id): - await filter_snippets_async.filter_limit_cells_per_col(PROJECT, - BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_limit_cells_per_col( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_limit_cells_per_row(capsys, table_id): - await filter_snippets_async.filter_limit_cells_per_row(PROJECT, - BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_limit_cells_per_row( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_limit_cells_per_row_offset(capsys, table_id): - await filter_snippets_async.filter_limit_cells_per_row_offset( - PROJECT, BIGTABLE_INSTANCE, table_id - ) + await filter_snippets_async.filter_limit_cells_per_row_offset( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_limit_col_family_regex(capsys, table_id): - await filter_snippets_async.filter_limit_col_family_regex(PROJECT, - BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_limit_col_family_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_limit_col_qualifier_regex(capsys, table_id): - await filter_snippets_async.filter_limit_col_qualifier_regex( - PROJECT, BIGTABLE_INSTANCE, table_id - ) + await filter_snippets_async.filter_limit_col_qualifier_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_limit_col_range(capsys, table_id): - await filter_snippets_async.filter_limit_col_range(PROJECT, BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_limit_col_range( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_limit_value_range(capsys, table_id): - await filter_snippets_async.filter_limit_value_range(PROJECT, - BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_limit_value_range( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_limit_value_regex(capsys, table_id): - await filter_snippets_async.filter_limit_value_regex(PROJECT, - BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_limit_value_regex( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_limit_timestamp_range(capsys, table_id): - await filter_snippets_async.filter_limit_timestamp_range(PROJECT, - BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_limit_timestamp_range( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_limit_block_all(capsys, table_id): - await filter_snippets_async.filter_limit_block_all(PROJECT, BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_limit_block_all( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_limit_pass_all(capsys, table_id): - await filter_snippets_async.filter_limit_pass_all(PROJECT, BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_limit_pass_all( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_modify_strip_value(capsys, table_id): - await filter_snippets_async.filter_modify_strip_value(PROJECT, - BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_modify_strip_value( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_modify_apply_label(capsys, table_id): - await filter_snippets_async.filter_modify_apply_label(PROJECT, - BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_modify_apply_label( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_composing_chain(capsys, table_id): - await filter_snippets_async.filter_composing_chain(PROJECT, BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_composing_chain( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_composing_interleave(capsys, table_id): - await filter_snippets_async.filter_composing_interleave(PROJECT, - BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_composing_interleave( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected @pytest.mark.asyncio async def test_filter_composing_condition(capsys, table_id): - await filter_snippets_async.filter_composing_condition(PROJECT, - BIGTABLE_INSTANCE, - table_id) + await filter_snippets_async.filter_composing_condition( + PROJECT, BIGTABLE_INSTANCE, table_id + ) - out, _ = capsys.readouterr() - expected = snapshots[inspect.currentframe().f_code.co_name] - assert out == expected + out, _ = capsys.readouterr() + expected = snapshots[inspect.currentframe().f_code.co_name] + assert out == expected From c7ac97508afff645740f154357dbf35b2644a96a Mon Sep 17 00:00:00 2001 From: Igor Berntein Date: Thu, 23 May 2024 15:59:19 -0400 Subject: [PATCH 3/3] suffix snippets --- .../snippets/filters/filter_snippets_async.py | 72 +++++++++---------- 1 file changed, 36 insertions(+), 36 deletions(-) diff --git a/samples/snippets/filters/filter_snippets_async.py b/samples/snippets/filters/filter_snippets_async.py index aec4b22cd..72dac824d 100644 --- a/samples/snippets/filters/filter_snippets_async.py +++ b/samples/snippets/filters/filter_snippets_async.py @@ -16,7 +16,7 @@ from google.cloud._helpers import _datetime_from_microseconds -# [START bigtable_filters_limit_row_sample] +# [START bigtable_filters_limit_row_sample_asyncio] async def filter_limit_row_sample(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -29,8 +29,8 @@ async def filter_limit_row_sample(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_limit_row_sample] -# [START bigtable_filters_limit_row_regex] +# [END bigtable_filters_limit_row_sample_asyncio] +# [START bigtable_filters_limit_row_regex_asyncio] async def filter_limit_row_regex(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -45,8 +45,8 @@ async def filter_limit_row_regex(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_limit_row_regex] -# [START bigtable_filters_limit_cells_per_col] +# [END bigtable_filters_limit_row_regex_asyncio] +# [START bigtable_filters_limit_cells_per_col_asyncio] async def filter_limit_cells_per_col(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -59,8 +59,8 @@ async def filter_limit_cells_per_col(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_limit_cells_per_col] -# [START bigtable_filters_limit_cells_per_row] +# [END bigtable_filters_limit_cells_per_col_asyncio] +# [START bigtable_filters_limit_cells_per_row_asyncio] async def filter_limit_cells_per_row(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -73,8 +73,8 @@ async def filter_limit_cells_per_row(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_limit_cells_per_row] -# [START bigtable_filters_limit_cells_per_row_offset] +# [END bigtable_filters_limit_cells_per_row_asyncio] +# [START bigtable_filters_limit_cells_per_row_offset_asyncio] async def filter_limit_cells_per_row_offset(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -87,8 +87,8 @@ async def filter_limit_cells_per_row_offset(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_limit_cells_per_row_offset] -# [START bigtable_filters_limit_col_family_regex] +# [END bigtable_filters_limit_cells_per_row_offset_asyncio] +# [START bigtable_filters_limit_col_family_regex_asyncio] async def filter_limit_col_family_regex(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -103,8 +103,8 @@ async def filter_limit_col_family_regex(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_limit_col_family_regex] -# [START bigtable_filters_limit_col_qualifier_regex] +# [END bigtable_filters_limit_col_family_regex_asyncio] +# [START bigtable_filters_limit_col_qualifier_regex_asyncio] async def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -121,8 +121,8 @@ async def filter_limit_col_qualifier_regex(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_limit_col_qualifier_regex] -# [START bigtable_filters_limit_col_range] +# [END bigtable_filters_limit_col_qualifier_regex_asyncio] +# [START bigtable_filters_limit_col_range_asyncio] async def filter_limit_col_range(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -139,8 +139,8 @@ async def filter_limit_col_range(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_limit_col_range] -# [START bigtable_filters_limit_value_range] +# [END bigtable_filters_limit_col_range_asyncio] +# [START bigtable_filters_limit_value_range_asyncio] async def filter_limit_value_range(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -155,8 +155,8 @@ async def filter_limit_value_range(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_limit_value_range] -# [START bigtable_filters_limit_value_regex] +# [END bigtable_filters_limit_value_range_asyncio] +# [START bigtable_filters_limit_value_regex_asyncio] async def filter_limit_value_regex(project_id, instance_id, table_id): @@ -173,8 +173,8 @@ async def filter_limit_value_regex(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_limit_value_regex] -# [START bigtable_filters_limit_timestamp_range] +# [END bigtable_filters_limit_value_regex_asyncio] +# [START bigtable_filters_limit_timestamp_range_asyncio] async def filter_limit_timestamp_range(project_id, instance_id, table_id): import datetime from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery @@ -190,8 +190,8 @@ async def filter_limit_timestamp_range(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_limit_timestamp_range] -# [START bigtable_filters_limit_block_all] +# [END bigtable_filters_limit_timestamp_range_asyncio] +# [START bigtable_filters_limit_block_all_asyncio] async def filter_limit_block_all(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -204,8 +204,8 @@ async def filter_limit_block_all(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_limit_block_all] -# [START bigtable_filters_limit_pass_all] +# [END bigtable_filters_limit_block_all_asyncio] +# [START bigtable_filters_limit_pass_all_asyncio] async def filter_limit_pass_all(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -218,8 +218,8 @@ async def filter_limit_pass_all(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_limit_pass_all] -# [START bigtable_filters_modify_strip_value] +# [END bigtable_filters_limit_pass_all_asyncio] +# [START bigtable_filters_modify_strip_value_asyncio] async def filter_modify_strip_value(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -232,8 +232,8 @@ async def filter_modify_strip_value(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_modify_strip_value] -# [START bigtable_filters_modify_apply_label] +# [END bigtable_filters_modify_strip_value_asyncio] +# [START bigtable_filters_modify_apply_label_asyncio] async def filter_modify_apply_label(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -246,8 +246,8 @@ async def filter_modify_apply_label(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_modify_apply_label] -# [START bigtable_filters_composing_chain] +# [END bigtable_filters_modify_apply_label_asyncio] +# [START bigtable_filters_composing_chain_asyncio] async def filter_composing_chain(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -267,8 +267,8 @@ async def filter_composing_chain(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_composing_chain] -# [START bigtable_filters_composing_interleave] +# [END bigtable_filters_composing_chain_asyncio] +# [START bigtable_filters_composing_interleave_asyncio] async def filter_composing_interleave(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -288,8 +288,8 @@ async def filter_composing_interleave(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_composing_interleave] -# [START bigtable_filters_composing_condition] +# [END bigtable_filters_composing_interleave_asyncio] +# [START bigtable_filters_composing_condition_asyncio] async def filter_composing_condition(project_id, instance_id, table_id): from google.cloud.bigtable.data import BigtableDataClientAsync, ReadRowsQuery from google.cloud.bigtable.data import row_filters @@ -313,7 +313,7 @@ async def filter_composing_condition(project_id, instance_id, table_id): print_row(row) -# [END bigtable_filters_composing_condition] +# [END bigtable_filters_composing_condition_asyncio] # [END_EXCLUDE]