Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: Add support for Cloud Bigtable Node Scaling Factor for CBT Clusters #1023

Merged
merged 6 commits into from
Sep 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
59 changes: 59 additions & 0 deletions google/cloud/bigtable_admin_v2/types/instance.py
Original file line number Diff line number Diff line change
Expand Up @@ -237,6 +237,9 @@ class Cluster(proto.Message):
The number of nodes allocated to this
cluster. More nodes enable higher throughput and
more consistent performance.
node_scaling_factor (google.cloud.bigtable_admin_v2.types.Cluster.NodeScalingFactor):
Immutable. The node scaling factor of this
cluster.
cluster_config (google.cloud.bigtable_admin_v2.types.Cluster.ClusterConfig):
Configuration for this cluster.

Expand Down Expand Up @@ -284,6 +287,28 @@ class State(proto.Enum):
RESIZING = 3
DISABLED = 4

class NodeScalingFactor(proto.Enum):
r"""Possible node scaling factors of the clusters. Node scaling
delivers better latency and more throughput by removing node
boundaries.

Values:
NODE_SCALING_FACTOR_UNSPECIFIED (0):
No node scaling specified. Defaults to
NODE_SCALING_FACTOR_1X.
NODE_SCALING_FACTOR_1X (1):
The cluster is running with a scaling factor
of 1.
NODE_SCALING_FACTOR_2X (2):
The cluster is running with a scaling factor of 2. All node
count values must be in increments of 2 with this scaling
factor enabled, otherwise an INVALID_ARGUMENT error will be
returned.
"""
NODE_SCALING_FACTOR_UNSPECIFIED = 0
NODE_SCALING_FACTOR_1X = 1
NODE_SCALING_FACTOR_2X = 2

class ClusterAutoscalingConfig(proto.Message):
r"""Autoscaling config for a cluster.

Expand Down Expand Up @@ -364,6 +389,11 @@ class EncryptionConfig(proto.Message):
proto.INT32,
number=4,
)
node_scaling_factor: NodeScalingFactor = proto.Field(
proto.ENUM,
number=9,
enum=NodeScalingFactor,
)
cluster_config: ClusterConfig = proto.Field(
proto.MESSAGE,
number=7,
Expand Down Expand Up @@ -468,18 +498,47 @@ class MultiClusterRoutingUseAny(proto.Message):
in a region are considered equidistant. Choosing this option
sacrifices read-your-writes consistency to improve availability.


.. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields

Attributes:
cluster_ids (MutableSequence[str]):
The set of clusters to route to. The order is
ignored; clusters will be tried in order of
distance. If left empty, all clusters are
eligible.
row_affinity (google.cloud.bigtable_admin_v2.types.AppProfile.MultiClusterRoutingUseAny.RowAffinity):
Row affinity sticky routing based on the row
key of the request. Requests that span multiple
rows are routed non-deterministically.

This field is a member of `oneof`_ ``affinity``.
"""

class RowAffinity(proto.Message):
r"""If enabled, Bigtable will route the request based on the row
key of the request, rather than randomly. Instead, each row key
will be assigned to a cluster, and will stick to that cluster.
If clusters are added or removed, then this may affect which row
keys stick to which clusters. To avoid this, users can use a
cluster group to specify which clusters are to be used. In this
case, new clusters that are not a part of the cluster group will
not be routed to, and routing will be unaffected by the new
cluster. Moreover, clusters specified in the cluster group
cannot be deleted unless removed from the cluster group.

"""

cluster_ids: MutableSequence[str] = proto.RepeatedField(
proto.STRING,
number=1,
)
row_affinity: "AppProfile.MultiClusterRoutingUseAny.RowAffinity" = proto.Field(
proto.MESSAGE,
number=3,
oneof="affinity",
message="AppProfile.MultiClusterRoutingUseAny.RowAffinity",
)

class SingleClusterRouting(proto.Message):
r"""Unconditionally routes all read/write requests to a specific
Expand Down
2 changes: 1 addition & 1 deletion scripts/fixup_bigtable_admin_v2_keywords.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ class bigtable_adminCallTransformer(cst.CSTTransformer):
'update_app_profile': ('app_profile', 'update_mask', 'ignore_warnings', ),
'update_authorized_view': ('authorized_view', 'update_mask', 'ignore_warnings', ),
'update_backup': ('backup', 'update_mask', ),
'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'cluster_config', 'default_storage_type', 'encryption_config', ),
'update_cluster': ('name', 'location', 'state', 'serve_nodes', 'node_scaling_factor', 'cluster_config', 'default_storage_type', 'encryption_config', ),
'update_instance': ('display_name', 'name', 'state', 'type_', 'labels', 'create_time', 'satisfies_pzs', ),
'update_table': ('table', 'update_mask', ),
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3837,6 +3837,7 @@ def test_get_cluster(request_type, transport: str = "grpc"):
location="location_value",
state=instance.Cluster.State.READY,
serve_nodes=1181,
node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X,
default_storage_type=common.StorageType.SSD,
)
response = client.get_cluster(request)
Expand All @@ -3853,6 +3854,10 @@ def test_get_cluster(request_type, transport: str = "grpc"):
assert response.location == "location_value"
assert response.state == instance.Cluster.State.READY
assert response.serve_nodes == 1181
assert (
response.node_scaling_factor
== instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X
)
assert response.default_storage_type == common.StorageType.SSD


Expand Down Expand Up @@ -3956,6 +3961,7 @@ async def test_get_cluster_empty_call_async():
location="location_value",
state=instance.Cluster.State.READY,
serve_nodes=1181,
node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X,
default_storage_type=common.StorageType.SSD,
)
)
Expand Down Expand Up @@ -4030,6 +4036,7 @@ async def test_get_cluster_async(
location="location_value",
state=instance.Cluster.State.READY,
serve_nodes=1181,
node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X,
default_storage_type=common.StorageType.SSD,
)
)
Expand All @@ -4047,6 +4054,10 @@ async def test_get_cluster_async(
assert response.location == "location_value"
assert response.state == instance.Cluster.State.READY
assert response.serve_nodes == 1181
assert (
response.node_scaling_factor
== instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X
)
assert response.default_storage_type == common.StorageType.SSD


Expand Down Expand Up @@ -11381,6 +11392,7 @@ def test_create_cluster_rest(request_type):
"location": "location_value",
"state": 1,
"serve_nodes": 1181,
"node_scaling_factor": 1,
"cluster_config": {
"cluster_autoscaling_config": {
"autoscaling_limits": {
Expand Down Expand Up @@ -11800,6 +11812,7 @@ def test_get_cluster_rest(request_type):
location="location_value",
state=instance.Cluster.State.READY,
serve_nodes=1181,
node_scaling_factor=instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X,
default_storage_type=common.StorageType.SSD,
)

Expand All @@ -11820,6 +11833,10 @@ def test_get_cluster_rest(request_type):
assert response.location == "location_value"
assert response.state == instance.Cluster.State.READY
assert response.serve_nodes == 1181
assert (
response.node_scaling_factor
== instance.Cluster.NodeScalingFactor.NODE_SCALING_FACTOR_1X
)
assert response.default_storage_type == common.StorageType.SSD


Expand Down Expand Up @@ -12577,6 +12594,7 @@ def test_partial_update_cluster_rest(request_type):
"location": "location_value",
"state": 1,
"serve_nodes": 1181,
"node_scaling_factor": 1,
"cluster_config": {
"cluster_autoscaling_config": {
"autoscaling_limits": {
Expand Down Expand Up @@ -13267,7 +13285,8 @@ def test_create_app_profile_rest(request_type):
"etag": "etag_value",
"description": "description_value",
"multi_cluster_routing_use_any": {
"cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"]
"cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"],
"row_affinity": {},
},
"single_cluster_routing": {
"cluster_id": "cluster_id_value",
Expand Down Expand Up @@ -14396,7 +14415,8 @@ def test_update_app_profile_rest(request_type):
"etag": "etag_value",
"description": "description_value",
"multi_cluster_routing_use_any": {
"cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"]
"cluster_ids": ["cluster_ids_value1", "cluster_ids_value2"],
"row_affinity": {},
},
"single_cluster_routing": {
"cluster_id": "cluster_id_value",
Expand Down
Loading