diff --git a/.gitignore b/.gitignore
index 53a8785d3..1d330b55a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -35,6 +35,10 @@ __debug_bin
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
+# Go workspace file
+go.work
+go.work.sum
+
# Dependency directories
vendor/
packrd/
diff --git a/CHANGELOG.md b/CHANGELOG.md
index c93a29042..62b559322 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,6 +9,16 @@ nav_order: 1
+
+## [MAJOR.MINOR.PATCH] - YYYY-MM-DD
+
+- Add `aiven_kafka_quota` resource
+- Add `aiven_opensearch` resource and datasource field
+ `opensearch_user_config.opensearch.cluster_routing_allocation_balance_prefer_primary`: When set to true, OpenSearch
+ attempts to evenly distribute the primary shards between the cluster nodes
+- Add `aiven_opensearch` resource and datasource field `opensearch_user_config.opensearch.segrep`: Segment Replication
+ Backpressure Settings
+
## [4.31.1] - 2024-12-23
- Validate whether the `aiven_project.billing_group` field has changed before calling admin API
diff --git a/Makefile b/Makefile
index e6eb454ac..a81334d07 100644
--- a/Makefile
+++ b/Makefile
@@ -112,10 +112,12 @@ lint: lint-go lint-test lint-docs
lint-go: $(GOLANGCILINT)
$(GOLANGCILINT) run --build-tags all --timeout=30m ./...
+# Exclude files that use templates from linting
+TERRAFMT_EXCLUDE = -not -path "./internal/acctest/*" \
+ -not -path "./internal/sdkprovider/service/kafka/kafka_quota_test.go"
lint-test: $(TERRAFMT)
- $(TERRAFMT) diff ./internal -cfq
-
+ find ./internal -type f $(TERRAFMT_EXCLUDE) -exec $(TERRAFMT) diff {} -cfq \;
lint-docs: $(TFPLUGINDOCS)
PROVIDER_AIVEN_ENABLE_BETA=1 $(TFPLUGINDOCS) generate --rendered-website-dir tmp
@@ -132,7 +134,6 @@ lint-docs: $(TFPLUGINDOCS)
fmt: fmt-test fmt-imports
-
fmt-test: $(TERRAFMT)
$(TERRAFMT) fmt ./internal -fv
diff --git a/docs/data-sources/account_team_project.md b/docs/data-sources/account_team_project.md
index 4dcc54d51..626955fda 100644
--- a/docs/data-sources/account_team_project.md
+++ b/docs/data-sources/account_team_project.md
@@ -32,4 +32,4 @@ data "aiven_account_team_project" "account_team_project1" {
### Read-Only
- `id` (String) The ID of this resource.
-- `team_type` (String) The Account team project type. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `service:configuration:write`, `service:logs:read`, `project:services:read`, `project:services:write`, `project:audit_logs:read`, `service:data:write`, `service:secrets:read`, `service:users:write`, `role:services:maintenance`, `role:services:recover`, `organization:audit_logs:read`, `organization:users:write`, `organization:app_users:write`, `organization:groups:write`, `organization:idps:write`, `organization:domains:write` and `role:organization:admin`.
+- `team_type` (String) The Account team project type. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `service:configuration:write`, `service:logs:read`, `project:services:read`, `project:services:write`, `project:audit_logs:read`, `service:data:write`, `service:secrets:read`, `service:users:write`, `role:services:maintenance`, `role:services:recover`, `organization:audit_logs:read`, `organization:projects:write`, `organization:users:write`, `organization:app_users:write`, `organization:groups:write`, `organization:idps:write`, `organization:domains:write` and `role:organization:admin`.
diff --git a/docs/data-sources/opensearch.md b/docs/data-sources/opensearch.md
index 690426c4e..b53bbf61c 100644
--- a/docs/data-sources/opensearch.md
+++ b/docs/data-sources/opensearch.md
@@ -221,6 +221,7 @@ Read-Only:
- `action_destructive_requires_name` (Boolean)
- `auth_failure_listeners` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--opensearch--auth_failure_listeners))
- `cluster_max_shards_per_node` (Number)
+- `cluster_routing_allocation_balance_prefer_primary` (Boolean)
- `cluster_routing_allocation_node_concurrent_recoveries` (Number)
- `email_sender_name` (String)
- `email_sender_password` (String)
@@ -252,6 +253,7 @@ Read-Only:
- `search_backpressure` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--opensearch--search_backpressure))
- `search_insights_top_queries` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--opensearch--search_insights_top_queries))
- `search_max_buckets` (Number)
+- `segrep` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--opensearch--segrep))
- `shard_indexing_pressure` (List of Object) (see [below for nested schema](#nestedobjatt--opensearch_user_config--opensearch--shard_indexing_pressure))
- `thread_pool_analyze_queue_size` (Number)
- `thread_pool_analyze_size` (Number)
@@ -394,6 +396,17 @@ Read-Only:
+
+### Nested Schema for `opensearch_user_config.opensearch.segrep`
+
+Read-Only:
+
+- `pressure_checkpoint_limit` (Number)
+- `pressure_enabled` (Boolean)
+- `pressure_replica_stale_limit` (Number)
+- `pressure_time_limit` (String)
+
+
### Nested Schema for `opensearch_user_config.opensearch.shard_indexing_pressure`
diff --git a/docs/data-sources/project_user.md b/docs/data-sources/project_user.md
index 72e44b36b..d983a6273 100644
--- a/docs/data-sources/project_user.md
+++ b/docs/data-sources/project_user.md
@@ -31,4 +31,4 @@ data "aiven_project_user" "mytestuser" {
- `accepted` (Boolean) Whether the user has accepted the request to join the project. Users get an invite and become project members after accepting the invite.
- `id` (String) The ID of this resource.
-- `member_type` (String) Project membership type. The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:domains:write`, `organization:groups:write`, `organization:idps:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
+- `member_type` (String) Project membership type. The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:domains:write`, `organization:groups:write`, `organization:idps:write`, `organization:projects:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
diff --git a/docs/resources/account_team_project.md b/docs/resources/account_team_project.md
index b321e822e..6d2a3f992 100644
--- a/docs/resources/account_team_project.md
+++ b/docs/resources/account_team_project.md
@@ -48,7 +48,7 @@ resource "aiven_account_team_project" "main" {
### Optional
- `project_name` (String) The name of an already existing project
-- `team_type` (String) The Account team project type. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `service:configuration:write`, `service:logs:read`, `project:services:read`, `project:services:write`, `project:audit_logs:read`, `service:data:write`, `service:secrets:read`, `service:users:write`, `role:services:maintenance`, `role:services:recover`, `organization:audit_logs:read`, `organization:users:write`, `organization:app_users:write`, `organization:groups:write`, `organization:idps:write`, `organization:domains:write` and `role:organization:admin`.
+- `team_type` (String) The Account team project type. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `service:configuration:write`, `service:logs:read`, `project:services:read`, `project:services:write`, `project:audit_logs:read`, `service:data:write`, `service:secrets:read`, `service:users:write`, `role:services:maintenance`, `role:services:recover`, `organization:audit_logs:read`, `organization:projects:write`, `organization:users:write`, `organization:app_users:write`, `organization:groups:write`, `organization:idps:write`, `organization:domains:write` and `role:organization:admin`.
- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts))
### Read-Only
diff --git a/docs/resources/kafka_quota.md b/docs/resources/kafka_quota.md
new file mode 100644
index 000000000..03d4b3632
--- /dev/null
+++ b/docs/resources/kafka_quota.md
@@ -0,0 +1,84 @@
+---
+# generated by https://github.com/hashicorp/terraform-plugin-docs
+page_title: "aiven_kafka_quota Resource - terraform-provider-aiven"
+subcategory: ""
+description: |-
+ Creates and manages quotas for an Aiven for Apache Kafka® service user.
+---
+
+# aiven_kafka_quota (Resource)
+
+Creates and manages quotas for an Aiven for Apache Kafka® service user.
+
+## Example Usage
+
+```terraform
+resource "aiven_kafka_quota" "example_quota" {
+ project = data.aiven_project.foo.project
+ service_name = aiven_kafka.example_kafka.service_name
+ user = "example-kafka-user"
+ client_id = "example_client"
+ consumer_byte_rate = 1000
+ producer_byte_rate = 1000
+ request_percentage = 50
+}
+```
+
+
+## Schema
+
+### Required
+
+- `project` (String) The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
+- `service_name` (String) The name of the service that this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
+
+### Optional
+
+- `client_id` (String) Represents a logical group of clients, assigned a unique name by the client application.
+Quotas can be applied based on user, client-id, or both.
+The most relevant quota is chosen for each connection.
+All connections within a quota group share the same quota.
+It is possible to set default quotas for each (user, client-id), user or client-id group by specifying 'default'
+- `consumer_byte_rate` (Number) Defines the bandwidth limit in bytes/sec for each group of clients sharing a quota.
+Every distinct client group is allocated a specific quota, as defined by the cluster, on a per-broker basis.
+Exceeding this limit results in client throttling.
+- `producer_byte_rate` (Number) Defines the bandwidth limit in bytes/sec for each group of clients sharing a quota.
+Every distinct client group is allocated a specific quota, as defined by the cluster, on a per-broker basis.
+Exceeding this limit results in client throttling.
+- `request_percentage` (Number) Sets the maximum percentage of CPU time that a client group can use on request handler I/O and network threads per broker within a quota window.
+Exceeding this limit triggers throttling.
+The quota, expressed as a percentage, also indicates the total allowable CPU usage for the client groups sharing the quota.
+- `timeouts` (Block, Optional) (see [below for nested schema](#nestedblock--timeouts))
+- `user` (String) Represents a logical group of clients, assigned a unique name by the client application.
+Quotas can be applied based on user, client-id, or both.
+The most relevant quota is chosen for each connection.
+All connections within a quota group share the same quota.
+It is possible to set default quotas for each (user, client-id), user or client-id group by specifying 'default'
+
+### Read-Only
+
+- `id` (String) The ID of this resource.
+
+
+### Nested Schema for `timeouts`
+
+Optional:
+
+- `create` (String)
+- `default` (String)
+- `delete` (String)
+- `read` (String)
+- `update` (String)
+
+## Import
+
+Import is supported using the following syntax:
+
+```shell
+# When both USER and CLIENT_ID are specified
+terraform import aiven_kafka_quota.example_quota PROJECT/SERVICE_NAME/CLIENT_ID/USER
+# When only USER is specified
+terraform import aiven_kafka_quota.example_quota PROJECT/SERVICE_NAME//USER
+# When only CLIENT_ID is specified
+terraform import aiven_kafka_quota.example_quota PROJECT/SERVICE_NAME/CLIENT_ID/
+```
diff --git a/docs/resources/opensearch.md b/docs/resources/opensearch.md
index 2ef982315..7a8614a7c 100644
--- a/docs/resources/opensearch.md
+++ b/docs/resources/opensearch.md
@@ -246,6 +246,7 @@ Optional:
- `action_destructive_requires_name` (Boolean) Require explicit index names when deleting.
- `auth_failure_listeners` (Block List, Max: 1) Opensearch Security Plugin Settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--auth_failure_listeners))
- `cluster_max_shards_per_node` (Number) Controls the number of shards allowed in the cluster per data node. Example: `1000`.
+- `cluster_routing_allocation_balance_prefer_primary` (Boolean) When set to true, OpenSearch attempts to evenly distribute the primary shards between the cluster nodes. Enabling this setting does not always guarantee an equal number of primary shards on each node, especially in the event of a failover. Changing this setting to false after it was set to true does not invoke redistribution of primary shards. Default is false. Default: `false`.
- `cluster_routing_allocation_node_concurrent_recoveries` (Number) How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to node cpu count * 2.
- `email_sender_name` (String) Sender name placeholder to be used in Opensearch Dashboards and Opensearch keystore. Example: `alert-sender`.
- `email_sender_password` (String, Sensitive) Sender password for Opensearch alerts to authenticate with SMTP server. Example: `very-secure-mail-password`.
@@ -277,6 +278,7 @@ Optional:
- `search_backpressure` (Block List, Max: 1) Search Backpressure Settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--search_backpressure))
- `search_insights_top_queries` (Block List, Max: 1) (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--search_insights_top_queries))
- `search_max_buckets` (Number) Maximum number of aggregation buckets allowed in a single response. OpenSearch default value is used when this is not defined. Example: `10000`.
+- `segrep` (Block List, Max: 1) Segment Replication Backpressure Settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--segrep))
- `shard_indexing_pressure` (Block List, Max: 1) Shard indexing back pressure settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch--shard_indexing_pressure))
- `thread_pool_analyze_queue_size` (Number) Size for the thread pool queue. See documentation for exact details.
- `thread_pool_analyze_size` (Number) Size for the thread pool. See documentation for exact details. Do note this may have maximum value depending on CPU count - value is automatically lowered if set to higher than maximum value.
@@ -419,6 +421,17 @@ Optional:
+
+### Nested Schema for `opensearch_user_config.opensearch.segrep`
+
+Optional:
+
+- `pressure_checkpoint_limit` (Number) The maximum number of indexing checkpoints that a replica shard can fall behind when copying from primary. Once `segrep.pressure.checkpoint.limit` is breached along with `segrep.pressure.time.limit`, the segment replication backpressure mechanism is initiated. Default is 4 checkpoints. Default: `4`.
+- `pressure_enabled` (Boolean) Enables the segment replication backpressure mechanism. Default is false. Default: `false`.
+- `pressure_replica_stale_limit` (Number) The maximum number of stale replica shards that can exist in a replication group. Once `segrep.pressure.replica.stale.limit` is breached, the segment replication backpressure mechanism is initiated. Default is .5, which is 50% of a replication group. Default: `0.5`.
+- `pressure_time_limit` (String) The maximum amount of time that a replica shard can take to copy from the primary shard. Once segrep.pressure.time.limit is breached along with segrep.pressure.checkpoint.limit, the segment replication backpressure mechanism is initiated. Default is 5 minutes. Default: `5m`.
+
+
### Nested Schema for `opensearch_user_config.opensearch.shard_indexing_pressure`
diff --git a/docs/resources/organization_group_project.md b/docs/resources/organization_group_project.md
index 8b817cd87..3ad480c17 100644
--- a/docs/resources/organization_group_project.md
+++ b/docs/resources/organization_group_project.md
@@ -51,7 +51,7 @@ resource "aiven_organization_group_project" "example" {
- `group_id` (String) The ID of the user group.
- `project` (String) The project that the users in the group are members of.
-- `role` (String) [Project-level role](https://aiven.io/docs/platform/reference/project-member-privileges) assigned to all users in the group. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `service:configuration:write`, `service:logs:read`, `project:services:read`, `project:services:write`, `project:audit_logs:read`, `service:data:write`, `service:secrets:read`, `service:users:write`, `role:services:maintenance`, `role:services:recover`, `organization:audit_logs:read`, `organization:users:write`, `organization:app_users:write`, `organization:groups:write`, `organization:idps:write`, `organization:domains:write` and `role:organization:admin`.
+- `role` (String) [Project-level role](https://aiven.io/docs/platform/reference/project-member-privileges) assigned to all users in the group. The possible values are `admin`, `operator`, `developer`, `read_only`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `service:configuration:write`, `service:logs:read`, `project:services:read`, `project:services:write`, `project:audit_logs:read`, `service:data:write`, `service:secrets:read`, `service:users:write`, `role:services:maintenance`, `role:services:recover`, `organization:audit_logs:read`, `organization:projects:write`, `organization:users:write`, `organization:app_users:write`, `organization:groups:write`, `organization:idps:write`, `organization:domains:write` and `role:organization:admin`.
### Optional
diff --git a/docs/resources/organization_permission.md b/docs/resources/organization_permission.md
index 222ab9a70..8e87d5938 100644
--- a/docs/resources/organization_permission.md
+++ b/docs/resources/organization_permission.md
@@ -95,7 +95,7 @@ resource "aiven_organization_permission" "example_org_permissions" {
Required:
-- `permissions` (Set of String) List of [roles and permissions](https://aiven.io/docs/platform/concepts/permissions) to grant. The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:domains:write`, `organization:groups:write`, `organization:idps:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
+- `permissions` (Set of String) List of [roles and permissions](https://aiven.io/docs/platform/concepts/permissions) to grant. The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:domains:write`, `organization:groups:write`, `organization:idps:write`, `organization:projects:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
- `principal_id` (String) ID of the user or group to grant permissions to. Only active users who have accepted an [invite](https://aiven.io/docs/platform/howto/manage-org-users) to join the organization can be granted permissions.
- `principal_type` (String) The type of principal. The possible values are `user` and `user_group`.
diff --git a/docs/resources/project_user.md b/docs/resources/project_user.md
index 34b6bdae4..0333c11b9 100644
--- a/docs/resources/project_user.md
+++ b/docs/resources/project_user.md
@@ -33,7 +33,7 @@ resource "aiven_project_user" "mytestuser" {
### Required
- `email` (String) Email address of the user in lowercase. Changing this property forces recreation of the resource.
-- `member_type` (String) Project membership type. The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:domains:write`, `organization:groups:write`, `organization:idps:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
+- `member_type` (String) Project membership type. The possible values are `admin`, `developer`, `operator`, `organization:app_users:write`, `organization:audit_logs:read`, `organization:domains:write`, `organization:groups:write`, `organization:idps:write`, `organization:projects:write`, `organization:users:write`, `project:audit_logs:read`, `project:integrations:read`, `project:integrations:write`, `project:networking:read`, `project:networking:write`, `project:permissions:read`, `project:services:read`, `project:services:write`, `read_only`, `role:organization:admin`, `role:services:maintenance`, `role:services:recover`, `service:configuration:write`, `service:data:write`, `service:logs:read`, `service:secrets:read` and `service:users:write`.
- `project` (String) The name of the project this resource belongs to. To set up proper dependencies please refer to this variable as a reference. Changing this property forces recreation of the resource.
### Optional
diff --git a/examples/resources/aiven_kafka_quota/import.sh b/examples/resources/aiven_kafka_quota/import.sh
new file mode 100644
index 000000000..4b8c4bfe5
--- /dev/null
+++ b/examples/resources/aiven_kafka_quota/import.sh
@@ -0,0 +1,6 @@
+# When both USER and CLIENT_ID are specified
+terraform import aiven_kafka_quota.example_quota PROJECT/SERVICE_NAME/CLIENT_ID/USER
+# When only USER is specified
+terraform import aiven_kafka_quota.example_quota PROJECT/SERVICE_NAME//USER
+# When only CLIENT_ID is specified
+terraform import aiven_kafka_quota.example_quota PROJECT/SERVICE_NAME/CLIENT_ID/
diff --git a/examples/resources/aiven_kafka_quota/resource.tf b/examples/resources/aiven_kafka_quota/resource.tf
new file mode 100644
index 000000000..7405045f7
--- /dev/null
+++ b/examples/resources/aiven_kafka_quota/resource.tf
@@ -0,0 +1,9 @@
+resource "aiven_kafka_quota" "example_quota" {
+ project = data.aiven_project.foo.project
+ service_name = aiven_kafka.example_kafka.service_name
+ user = "example-kafka-user"
+ client_id = "example_client"
+ consumer_byte_rate = 1000
+ producer_byte_rate = 1000
+ request_percentage = 50
+}
\ No newline at end of file
diff --git a/go.mod b/go.mod
index b7d5a171c..e5d3970a0 100644
--- a/go.mod
+++ b/go.mod
@@ -4,7 +4,7 @@ go 1.23
require (
github.com/aiven/aiven-go-client/v2 v2.33.0
- github.com/aiven/go-client-codegen v0.74.0
+ github.com/aiven/go-client-codegen v0.76.0
github.com/avast/retry-go v3.0.0+incompatible
github.com/dave/jennifer v1.7.1
github.com/docker/go-units v0.5.0
@@ -47,7 +47,7 @@ require (
require (
github.com/agext/levenshtein v1.2.3 // indirect
- github.com/aiven/go-api-schemas v1.106.0
+ github.com/aiven/go-api-schemas v1.107.0
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/fatih/color v1.16.0 // indirect
@@ -65,7 +65,7 @@ require (
github.com/hashicorp/hcl/v2 v2.23.0 // indirect
github.com/hashicorp/logutils v1.0.0 // indirect
github.com/hashicorp/terraform-exec v0.21.0 // indirect
- github.com/hashicorp/terraform-json v0.23.0 // indirect
+ github.com/hashicorp/terraform-json v0.23.0
github.com/hashicorp/terraform-plugin-framework-timeouts v0.4.1
github.com/hashicorp/terraform-plugin-framework-validators v0.16.0
github.com/hashicorp/terraform-plugin-log v0.9.0 // indirect
diff --git a/go.sum b/go.sum
index 4b08e60f6..615d914bc 100644
--- a/go.sum
+++ b/go.sum
@@ -8,10 +8,10 @@ github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7l
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/aiven/aiven-go-client/v2 v2.33.0 h1:7hsM3/2lVog/P9ls/gLeba5feNVQjK8rIL+lbxD2GB4=
github.com/aiven/aiven-go-client/v2 v2.33.0/go.mod h1:qXBgER0dtjJa1V3l7kzpizuAGjFCkgahhHL5OpoM2ZM=
-github.com/aiven/go-api-schemas v1.106.0 h1:qncRsbiaGnU9JE9fmTFHclTCBem+t+6EPMXGXM35w2c=
-github.com/aiven/go-api-schemas v1.106.0/go.mod h1:z7dGvufm6If4gOdVr7dWTuFZmll9FOZr5Z5CSxGpebA=
-github.com/aiven/go-client-codegen v0.74.0 h1:CqZUq8aGdhgcKJuM+YbC8RVUVgnCw9aA6yk0hMfvXWg=
-github.com/aiven/go-client-codegen v0.74.0/go.mod h1:QKN/GgLMGWd6+gPEucXlZPi5vC3C6RpD3UeBRQOLI1Y=
+github.com/aiven/go-api-schemas v1.107.0 h1:+FKR5Ko70yv4Xto6S4TqX0eHzme/uB81Mp5bUcXXf6U=
+github.com/aiven/go-api-schemas v1.107.0/go.mod h1:z7dGvufm6If4gOdVr7dWTuFZmll9FOZr5Z5CSxGpebA=
+github.com/aiven/go-client-codegen v0.76.0 h1:4Od3FmkF9ApdQU+FBgxQLxFge9cmpnMziXpKddkeWow=
+github.com/aiven/go-client-codegen v0.76.0/go.mod h1:QKN/GgLMGWd6+gPEucXlZPi5vC3C6RpD3UeBRQOLI1Y=
github.com/apparentlymart/go-textseg/v12 v12.0.0/go.mod h1:S/4uRK2UtaQttw1GenVJEynmyUenKwP++x/+DdGV/Ec=
github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY=
github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4=
diff --git a/internal/acctest/plancheck.go b/internal/acctest/plancheck.go
new file mode 100644
index 000000000..894a132e3
--- /dev/null
+++ b/internal/acctest/plancheck.go
@@ -0,0 +1,98 @@
+package acctest
+
+import (
+ "context"
+ "fmt"
+
+ tfjson "github.com/hashicorp/terraform-json"
+ "github.com/hashicorp/terraform-plugin-testing/plancheck"
+)
+
+type attributeChangeCheck struct {
+ resourceAddr string
+ attrs []string
+}
+
+func ExpectOnlyAttributesChanged(resourceAddr string, attrs ...string) plancheck.PlanCheck {
+ return &attributeChangeCheck{
+ resourceAddr: resourceAddr,
+ attrs: attrs,
+ }
+}
+
+func (c *attributeChangeCheck) CheckPlan(_ context.Context, req plancheck.CheckPlanRequest, resp *plancheck.CheckPlanResponse) {
+ var targetResource *tfjson.ResourceChange
+
+ // Find our resource in the changes
+ for _, rc := range req.Plan.ResourceChanges {
+ if rc.Address == c.resourceAddr {
+ targetResource = rc
+ break
+ }
+ }
+
+ if targetResource == nil {
+ resp.Error = fmt.Errorf("resource %s not found in plan", c.resourceAddr)
+ return
+ }
+
+ if targetResource.Change == nil {
+ resp.Error = fmt.Errorf("no changes found for resource %s", c.resourceAddr)
+ return
+ }
+
+ // Convert Before and After to maps
+ before, ok := targetResource.Change.Before.(map[string]interface{})
+ if !ok {
+ resp.Error = fmt.Errorf("before state for resource %s is not a map", c.resourceAddr)
+ return
+ }
+
+ after, ok := targetResource.Change.After.(map[string]interface{})
+ if !ok {
+ resp.Error = fmt.Errorf("after state for resource %s is not a map", c.resourceAddr)
+
+ return
+ }
+
+ // Create a set of expected changes
+ expectedChanges := make(map[string]struct{})
+ for _, attr := range c.attrs {
+ expectedChanges[attr] = struct{}{}
+ }
+
+ // Check all attributes in the after state
+ for key, afterValue := range after {
+ beforeValue, existsInBefore := before[key]
+
+ // If value changed
+ if !existsInBefore || beforeValue != afterValue {
+ // Check if this change was expected
+ if _, expected := expectedChanges[key]; !expected {
+ resp.Error = fmt.Errorf(
+ "unexpected change in attribute %q for resource %s: before=%v, after=%v",
+ key,
+ c.resourceAddr,
+ beforeValue,
+ afterValue,
+ )
+ return
+ }
+ // Remove from expected changes as we found it
+ delete(expectedChanges, key)
+ }
+ }
+
+ // Check if all expected changes were found
+ if len(expectedChanges) > 0 {
+ remaining := make([]string, 0, len(expectedChanges))
+ for attr := range expectedChanges {
+ remaining = append(remaining, attr)
+ }
+ resp.Error = fmt.Errorf(
+ "expected changes in attributes %v for resource %s were not found in plan",
+ remaining,
+ c.resourceAddr,
+ )
+ }
+}
diff --git a/internal/acctest/template.go b/internal/acctest/template.go
new file mode 100644
index 000000000..1cca67355
--- /dev/null
+++ b/internal/acctest/template.go
@@ -0,0 +1,223 @@
+package acctest
+
+import (
+ "bytes"
+ "fmt"
+ "html/template"
+ "sort"
+ "strings"
+ "testing"
+)
+
+// ResourceConfig is the interface that all resource configs must implement
+type resourceConfig interface {
+ // ToMap converts the config to a map for template rendering
+ ToMap() map[string]any
+}
+
+// Template represents a single Terraform configuration template
+type Template struct {
+ Name string
+ Template string
+}
+
+// TemplateRegistry holds templates for a specific resource type
+type TemplateRegistry struct {
+ resourceName string
+ templates map[string]*template.Template
+ funcMap template.FuncMap
+}
+
+// NewTemplateRegistry creates a new template registry for a resource
+func NewTemplateRegistry(resourceName string) *TemplateRegistry {
+ return &TemplateRegistry{
+ resourceName: resourceName,
+ templates: make(map[string]*template.Template),
+ funcMap: make(template.FuncMap),
+ }
+}
+
+// AddTemplate adds a new template to the registry
+func (r *TemplateRegistry) AddTemplate(t testing.TB, name, templateStr string) error {
+ t.Helper()
+
+ tmpl := template.New(name)
+ if len(r.funcMap) > 0 {
+ tmpl = tmpl.Funcs(r.funcMap)
+ }
+
+ parsed, err := tmpl.Parse(templateStr)
+ if err != nil {
+ return fmt.Errorf("failed to parse template: %w", err)
+ }
+ r.templates[name] = parsed
+
+ return nil
+}
+
+// MustAddTemplate is like AddTemplate but panics on error
+func (r *TemplateRegistry) MustAddTemplate(t testing.TB, name, templateStr string) {
+ t.Helper()
+
+ if err := r.AddTemplate(t, name, templateStr); err != nil {
+ t.Fatal(err)
+ }
+}
+
+// Render renders a template with the given config
+func (r *TemplateRegistry) Render(t testing.TB, templateKey string, cfg map[string]any) (string, error) {
+ t.Helper()
+
+ tmpl, exists := r.templates[templateKey]
+ if !exists {
+ availableTemplates := r.getAvailableTemplates()
+
+ return "", fmt.Errorf("template %q does not exist for resource %s. Available templates: %v",
+ templateKey,
+ r.resourceName,
+ availableTemplates,
+ )
+ }
+
+ var buf bytes.Buffer
+ if err := tmpl.Execute(&buf, cfg); err != nil {
+ return "", fmt.Errorf("failed to render template: %w", err)
+ }
+
+ return buf.String(), nil
+}
+
+// MustRender is like Render but fails the test on error
+func (r *TemplateRegistry) MustRender(t testing.TB, templateKey string, cfg map[string]any) string {
+ t.Helper()
+
+ result, err := r.Render(t, templateKey, cfg)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ return result
+}
+
+// AddFunction adds a custom function to the template registry
+func (r *TemplateRegistry) AddFunction(name string, fn interface{}) {
+ if r.funcMap == nil {
+ r.funcMap = make(template.FuncMap)
+ }
+ r.funcMap[name] = fn
+}
+
+// HasTemplate checks if a template exists in the registry
+func (r *TemplateRegistry) HasTemplate(key string) bool {
+ _, exists := r.templates[key]
+ return exists
+}
+
+// RemoveTemplate removes a template from the registry
+func (r *TemplateRegistry) RemoveTemplate(key string) {
+ delete(r.templates, key)
+}
+
+// getAvailableTemplates returns a sorted list of available template keys
+func (r *TemplateRegistry) getAvailableTemplates() []string {
+ templates := make([]string, 0, len(r.templates))
+ for k := range r.templates {
+ templates = append(templates, k)
+ }
+ sort.Strings(templates)
+
+ return templates
+}
+
+// compositionEntry represents a combination of template and its config
+type compositionEntry struct {
+ TemplateKey string
+ Config map[string]any
+}
+
+// CompositionBuilder helps build complex compositions of templates
+type CompositionBuilder struct {
+ registry *TemplateRegistry
+ compositions []compositionEntry
+}
+
+// NewCompositionBuilder creates a new composition builder
+func (r *TemplateRegistry) NewCompositionBuilder() *CompositionBuilder {
+ return &CompositionBuilder{
+ registry: r,
+ compositions: make([]compositionEntry, 0),
+ }
+}
+
+// Add adds a new template and config to the composition
+func (b *CompositionBuilder) Add(templateKey string, cfg map[string]any) *CompositionBuilder {
+ b.compositions = append(b.compositions, compositionEntry{
+ TemplateKey: templateKey,
+ Config: cfg,
+ })
+ return b
+}
+
+// AddWithConfig adds a new template and config to the composition using a resourceConfig
+func (b *CompositionBuilder) AddWithConfig(templateKey string, cfg resourceConfig) *CompositionBuilder {
+ b.compositions = append(b.compositions, compositionEntry{
+ TemplateKey: templateKey,
+ Config: cfg.ToMap(),
+ })
+ return b
+}
+
+// AddIf conditional method to CompositionBuilder
+func (b *CompositionBuilder) AddIf(condition bool, templateKey string, cfg map[string]any) *CompositionBuilder {
+ if condition {
+ return b.Add(templateKey, cfg)
+ }
+
+ return b
+}
+
+func (b *CompositionBuilder) Remove(templateKey string) *CompositionBuilder {
+ var newCompositions []compositionEntry
+ for _, comp := range b.compositions {
+ if comp.TemplateKey != templateKey {
+ newCompositions = append(newCompositions, comp)
+ }
+ }
+ b.compositions = newCompositions
+
+ return b
+}
+
+// Render renders all templates in the composition and combines them
+func (b *CompositionBuilder) Render(t testing.TB) (string, error) {
+ t.Helper()
+
+ var renderedParts = make([]string, 0, len(b.compositions))
+
+ // Render each template
+ for _, comp := range b.compositions {
+ rendered, err := b.registry.Render(t, comp.TemplateKey, comp.Config)
+ if err != nil {
+ return "", fmt.Errorf("failed to render template %s: %w", comp.TemplateKey, err)
+ }
+ renderedParts = append(renderedParts, rendered)
+ }
+
+ // Combine all rendered parts
+ combined := strings.Join(renderedParts, "\n\n")
+
+ //TODO: add HCL validation?
+
+ return combined, nil
+}
+
+// MustRender is like Render but fails the test on error
+func (b *CompositionBuilder) MustRender(t testing.TB) string {
+ t.Helper()
+
+ result, err := b.Render(t)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return result
+}
diff --git a/internal/acctest/template_test.go b/internal/acctest/template_test.go
new file mode 100644
index 000000000..7ffdc769c
--- /dev/null
+++ b/internal/acctest/template_test.go
@@ -0,0 +1,477 @@
+package acctest
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+)
+
+func TestCompositionBuilder_SingleResource(t *testing.T) {
+ registry := NewTemplateRegistry("test")
+
+ template := `resource "aiven_project" "example_project" {
+ project = "{{ .project_name }}"
+}`
+ registry.MustAddTemplate(t, "project", template)
+
+ builder := registry.NewCompositionBuilder()
+ builder.Add("project", map[string]any{
+ "project_name": "test-project",
+ })
+
+ expected := `resource "aiven_project" "example_project" {
+ project = "test-project"
+}`
+
+ result := builder.MustRender(t)
+ assert.Equal(t, normalizeHCL(expected), normalizeHCL(result))
+}
+
+func TestCompositionBuilder_TwoIndependentResources(t *testing.T) {
+ registry := NewTemplateRegistry("test")
+
+ registry.MustAddTemplate(t, "org_unit", `resource "aiven_organizational_unit" "example_unit" {
+ name = "{{ .name }}"
+}`)
+ registry.MustAddTemplate(t, "billing_group", `resource "aiven_billing_group" "example_billing_group" {
+ name = "{{ .name }}"
+ billing_currency = "{{ .currency }}"
+}`)
+
+ builder := registry.NewCompositionBuilder()
+ builder.Add("org_unit", map[string]any{
+ "name": "Example Unit",
+ })
+ builder.Add("billing_group", map[string]any{
+ "name": "Example Billing",
+ "currency": "USD",
+ })
+
+ expected := `resource "aiven_organizational_unit" "example_unit" {
+ name = "Example Unit"
+}
+resource "aiven_billing_group" "example_billing_group" {
+ name = "Example Billing"
+ billing_currency = "USD"
+}`
+
+ result := builder.MustRender(t)
+ assert.Equal(t, normalizeHCL(expected), normalizeHCL(result))
+}
+
+func TestCompositionBuilder_DependentResources(t *testing.T) {
+ registry := NewTemplateRegistry("test")
+
+ registry.MustAddTemplate(t, "billing_group", `resource "aiven_billing_group" "example_billing_group" {
+ name = "{{ .name }}"
+ billing_currency = "USD"
+}`)
+ registry.MustAddTemplate(t, "project", `resource "aiven_project" "example_project" {
+ project = "{{ .name }}"
+ billing_group = aiven_billing_group.example_billing_group.id
+}`)
+
+ builder := registry.NewCompositionBuilder()
+ builder.Add("billing_group", map[string]any{
+ "name": "example-billing",
+ })
+ builder.Add("project", map[string]any{
+ "name": "example-project",
+ })
+
+ expected := `resource "aiven_billing_group" "example_billing_group" {
+ name = "example-billing"
+ billing_currency = "USD"
+}
+resource "aiven_project" "example_project" {
+ project = "example-project"
+ billing_group = aiven_billing_group.example_billing_group.id
+}`
+
+ result := builder.MustRender(t)
+ assert.Equal(t, normalizeHCL(expected), normalizeHCL(result))
+}
+
+func TestCompositionBuilder_ConditionalResource(t *testing.T) {
+ registry := NewTemplateRegistry("test")
+
+ registry.MustAddTemplate(t, "project", `resource "aiven_project" "example_project" {
+ project = "{{ .name }}"
+}`)
+ registry.MustAddTemplate(t, "redis", `resource "aiven_redis" "redis1" {
+ project = aiven_project.example_project.project
+ service_name = "{{ .name }}"
+ plan = "{{ .plan }}"
+}`)
+
+ tests := []struct {
+ name string
+ includeRedis bool
+ expectedOutput string
+ }{
+ {
+ name: "with_redis",
+ includeRedis: true,
+ expectedOutput: `resource "aiven_project" "example_project" {
+ project = "test-project"
+}
+resource "aiven_redis" "redis1" {
+ project = aiven_project.example_project.project
+ service_name = "test-redis"
+ plan = "business-4"
+}`,
+ },
+ {
+ name: "without_redis",
+ includeRedis: false,
+ expectedOutput: `resource "aiven_project" "example_project" {
+ project = "test-project"
+}`,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ builder := registry.NewCompositionBuilder()
+
+ builder.Add("project", map[string]any{
+ "name": "test-project",
+ })
+
+ builder.AddIf(tt.includeRedis, "redis", map[string]any{
+ "name": "test-redis",
+ "plan": "business-4",
+ })
+
+ result := builder.MustRender(t)
+ assert.Equal(t, normalizeHCL(tt.expectedOutput), normalizeHCL(result))
+ })
+ }
+}
+
+func TestCompositionBuilder_DataSourceAndResource(t *testing.T) {
+ registry := NewTemplateRegistry("test")
+
+ registry.MustAddTemplate(t, "org_data", `data "aiven_organization" "main" {
+ name = "{{ .name }}"
+}`)
+ registry.MustAddTemplate(t, "project", `resource "aiven_project" "example_project" {
+ project = "{{ .name }}"
+ organization_id = data.aiven_organization.main.id
+}`)
+
+ builder := registry.NewCompositionBuilder()
+ builder.Add("org_data", map[string]any{
+ "name": "example-org",
+ })
+ builder.Add("project", map[string]any{
+ "name": "example-project",
+ })
+
+ expected := `data "aiven_organization" "main" {
+ name = "example-org"
+}
+resource "aiven_project" "example_project" {
+ project = "example-project"
+ organization_id = data.aiven_organization.main.id
+}`
+
+ result := builder.MustRender(t)
+ assert.Equal(t, normalizeHCL(expected), normalizeHCL(result))
+}
+
+func TestTemplateNilAndMissingValues(t *testing.T) {
+ registry := NewTemplateRegistry("test")
+
+ templateStr := `resource "aiven_service" "example" {
+ project = "{{ .project }}"
+ service_name = "{{ .service_name }}"
+ {{- if .maintenance_window_dow }}
+ maintenance_window_dow = "{{ .maintenance_window_dow }}"
+ {{- end }}
+ {{- if .maintenance_window_time }}
+ maintenance_window_time = "{{ .maintenance_window_time }}"
+ {{- end }}
+}`
+
+ registry.MustAddTemplate(t, "service", templateStr)
+
+ tests := []struct {
+ name string
+ config map[string]any
+ expected string
+ }{
+ {
+ name: "explicit_nil_value",
+ config: map[string]any{
+ "project": "test-project",
+ "service_name": "test-service",
+ "maintenance_window_dow": nil,
+ "maintenance_window_time": "10:00:00",
+ },
+ expected: `resource "aiven_service" "example" {
+ project = "test-project"
+ service_name = "test-service"
+ maintenance_window_time = "10:00:00"
+}`,
+ },
+ {
+ name: "missing_key",
+ config: map[string]any{
+ "project": "test-project",
+ "service_name": "test-service",
+ // maintenance_window_dow is not in config at all
+ "maintenance_window_time": "10:00:00",
+ },
+ expected: `resource "aiven_service" "example" {
+ project = "test-project"
+ service_name = "test-service"
+ maintenance_window_time = "10:00:00"
+}`,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ result := registry.MustRender(t, "service", tt.config)
+ assert.Equal(t, normalizeHCL(tt.expected), normalizeHCL(result))
+ })
+ }
+}
+
+func TestCompositionBuilder(t *testing.T) {
+ tests := []struct {
+ name string
+ templates map[string]string
+ compositions []struct {
+ templateKey string
+ config map[string]any
+ }
+ expectedOutput string
+ expectError bool
+ }{
+ {
+ name: "kafka_with_user",
+ templates: map[string]string{
+ "kafka": `resource "aiven_kafka" "example_kafka" {
+ project = data.aiven_project.example_project.project
+ cloud_name = "{{ .cloud_name }}"
+ plan = "{{ .plan }}"
+ service_name = "{{ .service_name }}"
+ maintenance_window_dow = "{{ .maintenance_window_dow }}"
+ maintenance_window_time = "{{ .maintenance_window_time }}"
+ kafka_user_config {
+ kafka_rest = "{{ .kafka_rest }}"
+ kafka_connect = "{{ .kafka_connect }}"
+ schema_registry = "{{ .schema_registry }}"
+ kafka_version = "{{ .kafka_version }}"
+ kafka {
+ group_max_session_timeout_ms = "{{ .group_max_session_timeout_ms }}"
+ log_retention_bytes = "{{ .log_retention_bytes }}"
+ }
+ public_access {
+ kafka_rest = "{{ .kafka_rest_public }}"
+ kafka_connect = "{{ .kafka_connect_public }}"
+ }
+ }
+}`,
+ "kafka_user": `resource "aiven_kafka_user" "example_service_user" {
+ service_name = aiven_kafka.example_kafka.service_name
+ project = data.aiven_project.example_project.project
+ username = "{{ .username }}"
+ password = "{{ .password }}"
+}`,
+ "project_data": `data "aiven_project" "example_project" {
+ project = "{{ .project }}"
+}`,
+ },
+ compositions: []struct {
+ templateKey string
+ config map[string]any
+ }{
+ {
+ templateKey: "project_data",
+ config: map[string]any{
+ "project": "example-project",
+ },
+ },
+ {
+ templateKey: "kafka",
+ config: map[string]any{
+ "cloud_name": "google-europe-west1",
+ "plan": "business-4",
+ "service_name": "example-kafka",
+ "maintenance_window_dow": "monday",
+ "maintenance_window_time": "10:00:00",
+ "kafka_rest": true,
+ "kafka_connect": true,
+ "schema_registry": true,
+ "kafka_version": "3.5",
+ "group_max_session_timeout_ms": 70000,
+ "log_retention_bytes": 1000000000,
+ "kafka_rest_public": true,
+ "kafka_connect_public": true,
+ },
+ },
+ {
+ templateKey: "kafka_user",
+ config: map[string]any{
+ "username": "example-kafka-user",
+ "password": "dummy-password",
+ },
+ },
+ },
+ expectedOutput: `data "aiven_project" "example_project" {
+ project = "example-project"
+}
+resource "aiven_kafka" "example_kafka" {
+ project = data.aiven_project.example_project.project
+ cloud_name = "google-europe-west1"
+ plan = "business-4"
+ service_name = "example-kafka"
+ maintenance_window_dow = "monday"
+ maintenance_window_time = "10:00:00"
+ kafka_user_config {
+ kafka_rest = "true"
+ kafka_connect = "true"
+ schema_registry = "true"
+ kafka_version = "3.5"
+ kafka {
+ group_max_session_timeout_ms = "70000"
+ log_retention_bytes = "1000000000"
+ }
+ public_access {
+ kafka_rest = "true"
+ kafka_connect = "true"
+ }
+ }
+}
+resource "aiven_kafka_user" "example_service_user" {
+ service_name = aiven_kafka.example_kafka.service_name
+ project = data.aiven_project.example_project.project
+ username = "example-kafka-user"
+ password = "dummy-password"
+}`,
+ },
+ {
+ name: "conditional_kafka_config",
+ templates: map[string]string{
+ "kafka_base": `resource "aiven_kafka" "kafka" {
+ project = "{{ .project }}"
+ service_name = "{{ .service_name }}"
+ cloud_name = "{{ .cloud_name }}"
+ plan = "{{ .plan }}"
+}`,
+ "kafka_config": ` kafka_user_config {
+ kafka_rest = {{ .kafka_rest }}
+ schema_registry = {{ .schema_registry }}
+ }`,
+ },
+ compositions: []struct {
+ templateKey string
+ config map[string]any
+ }{
+ {
+ templateKey: "kafka_base",
+ config: map[string]any{
+ "project": "test-project",
+ "service_name": "test-kafka",
+ "cloud_name": "google-europe-west1",
+ "plan": "business-4",
+ },
+ },
+ },
+ expectedOutput: `resource "aiven_kafka" "kafka" {
+ project = "test-project"
+ service_name = "test-kafka"
+ cloud_name = "google-europe-west1"
+ plan = "business-4"
+}`,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ registry := NewTemplateRegistry("test")
+
+ // Add all templates
+ for key, tmpl := range tt.templates {
+ err := registry.AddTemplate(t, key, tmpl)
+ assert.NoError(t, err)
+ }
+
+ // Create composition
+ builder := registry.NewCompositionBuilder()
+ for _, comp := range tt.compositions {
+ builder.Add(comp.templateKey, comp.config)
+ }
+
+ // Render and verify
+ result, err := builder.Render(t)
+ if tt.expectError {
+ assert.Error(t, err)
+ return
+ }
+
+ assert.NoError(t, err)
+ assert.Equal(t,
+ normalizeHCL(tt.expectedOutput),
+ normalizeHCL(result),
+ "Rendered template should match expected output",
+ )
+ })
+ }
+}
+
+// normalizeHCL function remains the same
+func normalizeHCL(s string) string {
+ // Split into lines for processing
+ lines := strings.Split(s, "\n")
+ var normalized = make([]string, 0, len(lines))
+
+ for _, line := range lines {
+ // Trim spaces from both ends
+ line = strings.TrimSpace(line)
+
+ // Skip empty lines
+ if line == "" {
+ continue
+ }
+
+ // Handle lines with just closing braces
+ if line == "}" {
+ normalized = append(normalized, line)
+ continue
+ }
+
+ // For lines with content, normalize internal spacing
+ if strings.Contains(line, "=") {
+ // Split by = and trim spaces
+ parts := strings.Split(line, "=")
+ if len(parts) == 2 {
+ key := strings.TrimSpace(parts[0])
+ value := strings.TrimSpace(parts[1])
+ // Reconstruct with consistent spacing
+ line = fmt.Sprintf(" %s = %s", key, value)
+ }
+ } else if strings.HasPrefix(line, "resource") || strings.HasPrefix(line, "data") {
+ // Handle resource and data block declarations
+ line = strings.TrimSpace(line)
+ } else if !strings.HasPrefix(line, "}") {
+ // Add consistent indentation for other non-empty lines
+ line = " " + strings.TrimSpace(line)
+ }
+
+ normalized = append(normalized, line)
+ }
+
+ // Join lines with newlines
+ result := strings.Join(normalized, "\n")
+
+ // Normalize line endings
+ result = strings.ReplaceAll(result, "\r\n", "\n")
+
+ return result
+}
diff --git a/internal/sdkprovider/provider/provider.go b/internal/sdkprovider/provider/provider.go
index 5f35201be..4e0634002 100644
--- a/internal/sdkprovider/provider/provider.go
+++ b/internal/sdkprovider/provider/provider.go
@@ -268,6 +268,7 @@ func Provider(version string) (*schema.Provider, error) {
"aiven_mirrormaker_replication_flow": kafka.ResourceMirrorMakerReplicationFlow(),
"aiven_kafka_connect": kafka.ResourceKafkaConnect(),
"aiven_kafka_mirrormaker": kafka.ResourceKafkaMirrormaker(),
+ "aiven_kafka_quota": kafka.ResourceKafkaQuota(),
// clickhouse
"aiven_clickhouse": clickhouse.ResourceClickhouse(),
diff --git a/internal/sdkprovider/service/kafka/kafka_quota.go b/internal/sdkprovider/service/kafka/kafka_quota.go
new file mode 100644
index 000000000..111660eea
--- /dev/null
+++ b/internal/sdkprovider/service/kafka/kafka_quota.go
@@ -0,0 +1,211 @@
+package kafka
+
+import (
+ "context"
+ "fmt"
+
+ avngen "github.com/aiven/go-client-codegen"
+ "github.com/aiven/go-client-codegen/handler/kafka"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/validation"
+
+ "github.com/aiven/terraform-provider-aiven/internal/common"
+ "github.com/aiven/terraform-provider-aiven/internal/schemautil"
+)
+
+var (
+ quotaFieldsAliases = map[string]string{
+ "client_id": "client-id",
+ }
+)
+
+var aivenKafkaQuotaSchema = map[string]*schema.Schema{
+ "project": schemautil.CommonSchemaProjectReference,
+ "service_name": schemautil.CommonSchemaServiceNameReference,
+ "user": {
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ Description: `
+Represents a logical group of clients, assigned a unique name by the client application.
+Quotas can be applied based on user, client-id, or both.
+The most relevant quota is chosen for each connection.
+All connections within a quota group share the same quota.
+It is possible to set default quotas for each (user, client-id), user or client-id group by specifying 'default'`,
+ ValidateFunc: schemautil.GetServiceUserValidateFunc(),
+ AtLeastOneOf: []string{"user", "client_id"},
+ },
+ "client_id": {
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ Description: `
+Represents a logical group of clients, assigned a unique name by the client application.
+Quotas can be applied based on user, client-id, or both.
+The most relevant quota is chosen for each connection.
+All connections within a quota group share the same quota.
+It is possible to set default quotas for each (user, client-id), user or client-id group by specifying 'default'`,
+ ValidateFunc: validation.StringLenBetween(1, 255),
+ AtLeastOneOf: []string{"user", "client_id"},
+ },
+ "consumer_byte_rate": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Description: `
+Defines the bandwidth limit in bytes/sec for each group of clients sharing a quota.
+Every distinct client group is allocated a specific quota, as defined by the cluster, on a per-broker basis.
+Exceeding this limit results in client throttling.`,
+ ValidateFunc: validation.IntBetween(0, 1073741824),
+ AtLeastOneOf: []string{"consumer_byte_rate", "producer_byte_rate", "request_percentage"},
+ },
+ "producer_byte_rate": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Description: `
+Defines the bandwidth limit in bytes/sec for each group of clients sharing a quota.
+Every distinct client group is allocated a specific quota, as defined by the cluster, on a per-broker basis.
+Exceeding this limit results in client throttling.`,
+ ValidateFunc: validation.IntBetween(0, 1073741824),
+ AtLeastOneOf: []string{"consumer_byte_rate", "producer_byte_rate", "request_percentage"},
+ },
+ "request_percentage": {
+ Type: schema.TypeInt,
+ Optional: true,
+ Description: `
+Sets the maximum percentage of CPU time that a client group can use on request handler I/O and network threads per broker within a quota window.
+Exceeding this limit triggers throttling.
+The quota, expressed as a percentage, also indicates the total allowable CPU usage for the client groups sharing the quota.`,
+ ValidateFunc: validation.IntBetween(0, 100),
+ AtLeastOneOf: []string{"consumer_byte_rate", "producer_byte_rate", "request_percentage"},
+ },
+}
+
+func ResourceKafkaQuota() *schema.Resource {
+ return &schema.Resource{
+ Description: "Creates and manages quotas for an Aiven for Apache Kafka® service user.",
+ ReadContext: common.WithGenClient(resourceKafkaQuotaRead),
+ CreateContext: common.WithGenClient(resourceKafkaQuotaCreate),
+ UpdateContext: common.WithGenClient(resourceKafkaQuotaUpdate),
+ DeleteContext: common.WithGenClient(resourceKafkaQuotaDelete),
+ Importer: &schema.ResourceImporter{
+ StateContext: schema.ImportStatePassthroughContext,
+ },
+ Timeouts: schemautil.DefaultResourceTimeouts(),
+
+ Schema: aivenKafkaQuotaSchema,
+ }
+}
+
+func resourceKafkaQuotaCreate(ctx context.Context, d *schema.ResourceData, client avngen.Client) error {
+ var (
+ project = d.Get("project").(string)
+ service = d.Get("service_name").(string)
+ user = d.Get("user").(string)
+ clientID = d.Get("client_id").(string)
+
+ req kafka.ServiceKafkaQuotaCreateIn
+ )
+
+ if err := schemautil.ResourceDataGet(
+ d,
+ &req,
+ schemautil.RenameAliases(quotaFieldsAliases),
+ ); err != nil {
+ return err
+ }
+
+ if err := client.ServiceKafkaQuotaCreate(ctx, project, service, &req); err != nil {
+ return err
+ }
+
+ d.SetId(schemautil.BuildResourceID(project, service, clientID, user))
+
+ return resourceKafkaQuotaRead(ctx, d, client)
+}
+
+func resourceKafkaQuotaUpdate(ctx context.Context, d *schema.ResourceData, client avngen.Client) error {
+ var (
+ req kafka.ServiceKafkaQuotaCreateIn
+ )
+
+ project, service, _, _, err := schemautil.SplitResourceID4(d.Id())
+ if err != nil {
+ return err
+ }
+
+ if err := schemautil.ResourceDataGet(
+ d,
+ &req,
+ schemautil.RenameAliases(quotaFieldsAliases),
+ ); err != nil {
+ return err
+ }
+
+ if err := client.ServiceKafkaQuotaCreate(ctx, project, service, &req); err != nil {
+ return err
+ }
+
+ return resourceKafkaQuotaRead(ctx, d, client)
+}
+
+func resourceKafkaQuotaRead(ctx context.Context, d *schema.ResourceData, client avngen.Client) error {
+ project, serviceName, clientID, user, err := schemautil.SplitResourceID4(d.Id())
+ if err != nil {
+ return err
+ }
+
+ var params [][2]string
+ if user != "" {
+ params = append(params, kafka.ServiceKafkaQuotaDescribeUser(user))
+ }
+
+ if clientID != "" {
+ params = append(params, kafka.ServiceKafkaQuotaDescribeClientId(clientID))
+ }
+
+ if len(params) == 0 {
+ return fmt.Errorf("invalid resource ID: %q, either user or client_id must be set", d.Id())
+ }
+
+ resp, err := client.ServiceKafkaQuotaDescribe(
+ ctx,
+ project,
+ serviceName,
+ params...,
+ )
+ if err != nil {
+ return err
+ }
+
+ return schemautil.ResourceDataSet(
+ aivenKafkaQuotaSchema,
+ d,
+ resp,
+ schemautil.RenameAliasesReverse(quotaFieldsAliases),
+ )
+}
+
+func resourceKafkaQuotaDelete(ctx context.Context, d *schema.ResourceData, client avngen.Client) error {
+ var (
+ project = d.Get("project").(string)
+ serviceName = d.Get("service_name").(string)
+ clientID = d.Get("client_id").(string)
+ user = d.Get("user").(string)
+ )
+
+ var params [][2]string
+ if user != "" {
+ params = append(params, kafka.ServiceKafkaQuotaDeleteUser(user))
+ }
+
+ if clientID != "" {
+ params = append(params, kafka.ServiceKafkaQuotaDeleteClientId(clientID))
+ }
+
+ return client.ServiceKafkaQuotaDelete(
+ ctx,
+ project,
+ serviceName,
+ params...,
+ )
+}
diff --git a/internal/sdkprovider/service/kafka/kafka_quota_test.go b/internal/sdkprovider/service/kafka/kafka_quota_test.go
new file mode 100644
index 000000000..ca7918a7e
--- /dev/null
+++ b/internal/sdkprovider/service/kafka/kafka_quota_test.go
@@ -0,0 +1,371 @@
+package kafka_test
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "regexp"
+ "testing"
+
+ "github.com/aiven/go-client-codegen/handler/kafka"
+ "github.com/hashicorp/terraform-plugin-testing/helper/acctest"
+ "github.com/hashicorp/terraform-plugin-testing/helper/resource"
+ "github.com/hashicorp/terraform-plugin-testing/plancheck"
+ "github.com/hashicorp/terraform-plugin-testing/terraform"
+
+ acc "github.com/aiven/terraform-provider-aiven/internal/acctest"
+ "github.com/aiven/terraform-provider-aiven/internal/common"
+ "github.com/aiven/terraform-provider-aiven/internal/schemautil"
+)
+
+const kafkaQuotaResource = "aiven_kafka_quota"
+
+func TestAccAivenKafkaQuota(t *testing.T) {
+ var (
+ registry = acc.NewTemplateRegistry(kafkaQuotaResource)
+ randName = acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum)
+ serviceName = fmt.Sprintf("test-acc-sr-%s", randName)
+ projectName = os.Getenv("AIVEN_PROJECT_NAME")
+
+ user = fmt.Sprintf("acc_test_user_%s", randName)
+ clientID = fmt.Sprintf("acc_test_client_%s", randName)
+ )
+
+ // Add templates
+ registry.MustAddTemplate(t, "project_data", `
+data "aiven_project" "foo" {
+ project = "{{ .project }}"
+}`)
+
+ registry.MustAddTemplate(t, "aiven_kafka", `
+resource "aiven_kafka" "bar" {
+ project = data.aiven_project.foo.project
+ cloud_name = "google-europe-west1"
+ plan = "startup-2"
+ service_name = "{{ .service_name }}"
+ maintenance_window_dow = "monday"
+ maintenance_window_time = "10:00:00"
+}`)
+
+ registry.MustAddTemplate(t, "kafka_quota", `
+resource "aiven_kafka_quota" "{{ .resource_name }}" {
+ project = data.aiven_project.foo.project
+ service_name = aiven_kafka.bar.service_name
+ {{- if .user }}
+ user = "{{ .user }}"
+ {{- end }}
+ {{- if .client_id }}
+ client_id = "{{ .client_id }}"
+ {{- end }}
+ {{- if .consumer_byte_rate }}
+ consumer_byte_rate = {{ .consumer_byte_rate }}
+ {{- end }}
+ {{- if .producer_byte_rate }}
+ producer_byte_rate = {{ .producer_byte_rate }}
+ {{- end }}
+ {{- if .request_percentage }}
+ request_percentage = {{ .request_percentage }}
+ {{- end }}
+}`)
+
+ var newComposition = func() *acc.CompositionBuilder {
+ return registry.NewCompositionBuilder().
+ Add("project_data", map[string]interface{}{
+ "project": projectName}).
+ Add("aiven_kafka", map[string]interface{}{
+ "service_name": serviceName,
+ })
+ }
+
+ resource.ParallelTest(t, resource.TestCase{
+ PreCheck: func() { acc.TestAccPreCheck(t) },
+ ProtoV6ProviderFactories: acc.TestProtoV6ProviderFactories,
+ CheckDestroy: testAccCheckAivenKafkaQuotaDestroy,
+ Steps: []resource.TestStep{
+ {
+ Config: newComposition().
+ Add("kafka_quota", map[string]any{
+ "resource_name": "full",
+ "service_name": serviceName,
+ "user": user,
+ "client_id": clientID,
+ "consumer_byte_rate": 1000,
+ "producer_byte_rate": 1000,
+ "request_percentage": 101, // invalid value
+ }).
+ MustRender(t),
+ ExpectError: regexp.MustCompile(`expected .+ to be in the range \(\d+ - \d+\), got \d+`),
+ },
+ {
+ // missing user and client_id
+ Config: newComposition().
+ Add("kafka_quota", map[string]any{
+ "resource_name": "full",
+ "service_name": serviceName,
+ "consumer_byte_rate": 1000,
+ "producer_byte_rate": 1000,
+ "request_percentage": 10,
+ }).
+ MustRender(t),
+ ExpectError: regexp.MustCompile(`"(?:user|client_id)": one of ` + "`" + `client_id,user` + "`" + ` must be specified`),
+ },
+ {
+ // valid configuration
+ Config: newComposition().
+ Add("kafka_quota", map[string]any{
+ "resource_name": "full",
+ "service_name": serviceName,
+ "user": user,
+ "client_id": clientID,
+ "consumer_byte_rate": 1000,
+ "producer_byte_rate": 1000,
+ "request_percentage": 10,
+ }).
+ MustRender(t),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "project", projectName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "service_name", serviceName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "user", user),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "client_id", clientID),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "consumer_byte_rate", "1000"),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "producer_byte_rate", "1000"),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "request_percentage", "10"),
+ ),
+ },
+ {
+ // check that the resource is not updated without changes
+ Config: newComposition().
+ Add("kafka_quota", map[string]any{
+ "resource_name": "full",
+ "service_name": serviceName,
+ "user": user,
+ "client_id": clientID,
+ "consumer_byte_rate": 1000,
+ "producer_byte_rate": 1000,
+ "request_percentage": 10,
+ }).
+ MustRender(t),
+ PlanOnly: true,
+ ExpectNonEmptyPlan: false,
+ },
+ {
+ // check plan that resource should be updated
+ Config: newComposition().
+ Add("kafka_quota", map[string]any{
+ "resource_name": "full",
+ "service_name": serviceName,
+ "user": user,
+ "client_id": clientID,
+ "consumer_byte_rate": 2000,
+ "producer_byte_rate": 2000,
+ "request_percentage": 100,
+ }).
+ MustRender(t),
+ PlanOnly: true,
+ ExpectNonEmptyPlan: true,
+ },
+ {
+ // check that the update action is triggered, only changed attributes are updated
+ Config: newComposition().
+ Add("kafka_quota", map[string]any{
+ "resource_name": "full",
+ "service_name": serviceName,
+ "user": user,
+ "client_id": clientID,
+ "consumer_byte_rate": 3000,
+ "producer_byte_rate": 3000,
+ "request_percentage": 10,
+ }).
+ MustRender(t),
+ ConfigPlanChecks: resource.ConfigPlanChecks{
+ PreApply: []plancheck.PlanCheck{
+ plancheck.ExpectResourceAction(fmt.Sprintf("%s.full", kafkaQuotaResource), plancheck.ResourceActionUpdate),
+ acc.ExpectOnlyAttributesChanged(fmt.Sprintf("%s.full", kafkaQuotaResource), "consumer_byte_rate", "producer_byte_rate"),
+ },
+ },
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "project", projectName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "service_name", serviceName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "user", user),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "client_id", clientID),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "consumer_byte_rate", "3000"),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "producer_byte_rate", "3000"),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "request_percentage", "10"),
+ ),
+ },
+ {
+ // check that resource is replaced when user is updated
+ Config: newComposition().
+ Add("kafka_quota", map[string]any{
+ "resource_name": "full",
+ "service_name": serviceName,
+ "user": fmt.Sprintf("%s_updated", user),
+ "client_id": clientID,
+ "consumer_byte_rate": 3000,
+ "producer_byte_rate": 3000,
+ "request_percentage": 10,
+ }).
+ MustRender(t),
+ ConfigPlanChecks: resource.ConfigPlanChecks{
+ PreApply: []plancheck.PlanCheck{
+ plancheck.ExpectNonEmptyPlan(),
+ plancheck.ExpectResourceAction(fmt.Sprintf("%s.full", kafkaQuotaResource), plancheck.ResourceActionReplace),
+ },
+ },
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "project", projectName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "service_name", serviceName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "user", fmt.Sprintf("%s_updated", user)),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "client_id", clientID),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "consumer_byte_rate", "3000"),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "producer_byte_rate", "3000"),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.full", kafkaQuotaResource), "request_percentage", "10"),
+ ),
+ },
+ {
+ // create new resource with only consumer_byte_rate set
+ Config: newComposition().
+ Add("kafka_quota", map[string]any{
+ "resource_name": "byte_rate",
+ "service_name": serviceName,
+ "user": user,
+ "client_id": clientID,
+ "consumer_byte_rate": 100,
+ }).
+ MustRender(t),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "project", projectName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "service_name", serviceName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "user", user),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "client_id", clientID),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "consumer_byte_rate", "100"),
+
+ resource.TestCheckNoResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "producer_byte_rate"),
+ resource.TestCheckNoResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "request_percentage"),
+ ),
+ },
+ {
+ // check that the resource is updated and only consumer_byte_rate was modified
+ Config: newComposition().
+ Add("kafka_quota", map[string]any{
+ "resource_name": "byte_rate",
+ "service_name": serviceName,
+ "user": user,
+ "client_id": clientID,
+ "consumer_byte_rate": 3000,
+ }).
+ MustRender(t),
+ ConfigPlanChecks: resource.ConfigPlanChecks{
+ PreApply: []plancheck.PlanCheck{
+ plancheck.ExpectResourceAction(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), plancheck.ResourceActionUpdate),
+ acc.ExpectOnlyAttributesChanged(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "consumer_byte_rate"),
+ },
+ },
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "project", projectName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "service_name", serviceName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "user", user),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "client_id", clientID),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "consumer_byte_rate", "3000"),
+
+ resource.TestCheckNoResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "producer_byte_rate"),
+ resource.TestCheckNoResourceAttr(fmt.Sprintf("%s.byte_rate", kafkaQuotaResource), "request_percentage"),
+ ),
+ },
+ {
+ // craete multiple resources with different configurations
+ Config: newComposition().
+ Add("kafka_quota", map[string]any{
+ "resource_name": "new_full",
+ "service_name": serviceName,
+ "user": user,
+ "client_id": clientID,
+ "consumer_byte_rate": 4000,
+ "producer_byte_rate": 4000,
+ "request_percentage": 40,
+ }).
+ Add("kafka_quota", map[string]any{
+ "resource_name": "user",
+ "service_name": serviceName,
+ "user": user,
+ "request_percentage": 20,
+ }).
+ Add("kafka_quota", map[string]any{
+ "resource_name": "client",
+ "service_name": serviceName,
+ "client_id": clientID,
+ "producer_byte_rate": 2000,
+ }).
+ MustRender(t),
+ Check: resource.ComposeTestCheckFunc(
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.new_full", kafkaQuotaResource), "project", projectName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.new_full", kafkaQuotaResource), "service_name", serviceName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.new_full", kafkaQuotaResource), "user", user),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.new_full", kafkaQuotaResource), "client_id", clientID),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.new_full", kafkaQuotaResource), "consumer_byte_rate", "4000"),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.new_full", kafkaQuotaResource), "producer_byte_rate", "4000"),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.new_full", kafkaQuotaResource), "request_percentage", "40"),
+
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.user", kafkaQuotaResource), "project", projectName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.user", kafkaQuotaResource), "service_name", serviceName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.user", kafkaQuotaResource), "user", user),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.user", kafkaQuotaResource), "request_percentage", "20"),
+ resource.TestCheckNoResourceAttr(fmt.Sprintf("%s.user", kafkaQuotaResource), "client_id"),
+ resource.TestCheckNoResourceAttr(fmt.Sprintf("%s.user", kafkaQuotaResource), "consumer_byte_rate"),
+ resource.TestCheckNoResourceAttr(fmt.Sprintf("%s.user", kafkaQuotaResource), "producer_byte_rate"),
+
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.client", kafkaQuotaResource), "project", projectName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.client", kafkaQuotaResource), "service_name", serviceName),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.client", kafkaQuotaResource), "client_id", clientID),
+ resource.TestCheckResourceAttr(fmt.Sprintf("%s.client", kafkaQuotaResource), "producer_byte_rate", "2000"),
+ resource.TestCheckNoResourceAttr(fmt.Sprintf("%s.client", kafkaQuotaResource), "user"),
+ resource.TestCheckNoResourceAttr(fmt.Sprintf("%s.client", kafkaQuotaResource), "consumer_byte_rate"),
+ resource.TestCheckNoResourceAttr(fmt.Sprintf("%s.client", kafkaQuotaResource), "request_percentage"),
+ ),
+ },
+ },
+ })
+}
+
+func testAccCheckAivenKafkaQuotaDestroy(s *terraform.State) error {
+ var (
+ c, err = acc.GetTestGenAivenClient()
+ ctx = context.Background()
+ )
+
+ if err != nil {
+ return fmt.Errorf("failed to instantiate GenAiven client: %w", err)
+ }
+
+ for _, rs := range s.RootModule().Resources {
+ if rs.Type != "aiven_kafka_quota" {
+ continue
+ }
+
+ p, sn, cID, u, err := schemautil.SplitResourceID4(rs.Primary.ID)
+ if err != nil {
+ return fmt.Errorf("error splitting resource ID: %w", err)
+ }
+
+ var params [][2]string
+ if u != "" {
+ params = append(params, kafka.ServiceKafkaQuotaDeleteUser(u))
+ }
+ if cID != "" {
+ params = append(params, kafka.ServiceKafkaQuotaDeleteClientId(cID))
+ }
+
+ _, err = c.ServiceKafkaQuotaDescribe(ctx, p, sn, params...)
+ if err != nil {
+ if !common.IsCritical(err) {
+ return nil
+ }
+
+ return err
+ }
+
+ return fmt.Errorf("kafka quota still exists")
+ }
+
+ return nil
+}
diff --git a/internal/sdkprovider/userconfig/service/opensearch.go b/internal/sdkprovider/userconfig/service/opensearch.go
index f92ab3e80..4e0b81a33 100644
--- a/internal/sdkprovider/userconfig/service/opensearch.go
+++ b/internal/sdkprovider/userconfig/service/opensearch.go
@@ -465,6 +465,11 @@ func opensearchUserConfig() *schema.Schema {
Optional: true,
Type: schema.TypeInt,
},
+ "cluster_routing_allocation_balance_prefer_primary": {
+ Description: "When set to true, OpenSearch attempts to evenly distribute the primary shards between the cluster nodes. Enabling this setting does not always guarantee an equal number of primary shards on each node, especially in the event of a failover. Changing this setting to false after it was set to true does not invoke redistribution of primary shards. Default is false. Default: `false`.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
"cluster_routing_allocation_node_concurrent_recoveries": {
Description: "How many concurrent incoming/outgoing shard recoveries (normally replicas) are allowed to happen on a node. Defaults to node cpu count * 2.",
Optional: true,
@@ -835,6 +840,34 @@ func opensearchUserConfig() *schema.Schema {
Optional: true,
Type: schema.TypeInt,
},
+ "segrep": {
+ Description: "Segment Replication Backpressure Settings",
+ Elem: &schema.Resource{Schema: map[string]*schema.Schema{
+ "pressure_checkpoint_limit": {
+ Description: "The maximum number of indexing checkpoints that a replica shard can fall behind when copying from primary. Once `segrep.pressure.checkpoint.limit` is breached along with `segrep.pressure.time.limit`, the segment replication backpressure mechanism is initiated. Default is 4 checkpoints. Default: `4`.",
+ Optional: true,
+ Type: schema.TypeInt,
+ },
+ "pressure_enabled": {
+ Description: "Enables the segment replication backpressure mechanism. Default is false. Default: `false`.",
+ Optional: true,
+ Type: schema.TypeBool,
+ },
+ "pressure_replica_stale_limit": {
+ Description: "The maximum number of stale replica shards that can exist in a replication group. Once `segrep.pressure.replica.stale.limit` is breached, the segment replication backpressure mechanism is initiated. Default is .5, which is 50% of a replication group. Default: `0.5`.",
+ Optional: true,
+ Type: schema.TypeFloat,
+ },
+ "pressure_time_limit": {
+ Description: "The maximum amount of time that a replica shard can take to copy from the primary shard. Once segrep.pressure.time.limit is breached along with segrep.pressure.checkpoint.limit, the segment replication backpressure mechanism is initiated. Default is 5 minutes. Default: `5m`.",
+ Optional: true,
+ Type: schema.TypeString,
+ },
+ }},
+ MaxItems: 1,
+ Optional: true,
+ Type: schema.TypeList,
+ },
"shard_indexing_pressure": {
Description: "Shard indexing back pressure settings",
Elem: &schema.Resource{Schema: map[string]*schema.Schema{
diff --git a/internal/sdkprovider/userconfig/service/service.go b/internal/sdkprovider/userconfig/service/service.go
index 201e538a6..8b133659e 100644
--- a/internal/sdkprovider/userconfig/service/service.go
+++ b/internal/sdkprovider/userconfig/service/service.go
@@ -108,9 +108,14 @@ func GetFieldMapping(kind string) map[string]string {
"ip_filter_string": "ip_filter",
},
"opensearch": {
- "ip_filter_object": "ip_filter",
- "ip_filter_string": "ip_filter",
- "opensearch/search_insights_top_queries": "opensearch/search.insights.top_queries",
+ "ip_filter_object": "ip_filter",
+ "ip_filter_string": "ip_filter",
+ "opensearch/cluster_routing_allocation_balance_prefer_primary": "opensearch/cluster.routing.allocation.balance.prefer_primary",
+ "opensearch/search_insights_top_queries": "opensearch/search.insights.top_queries",
+ "opensearch/segrep/pressure_checkpoint_limit": "opensearch/segrep/pressure.checkpoint.limit",
+ "opensearch/segrep/pressure_enabled": "opensearch/segrep/pressure.enabled",
+ "opensearch/segrep/pressure_replica_stale_limit": "opensearch/segrep/pressure.replica.stale.limit",
+ "opensearch/segrep/pressure_time_limit": "opensearch/segrep/pressure.time.limit",
},
"pg": {
"ip_filter_object": "ip_filter",
diff --git a/internal/sweep/sweep_test.go b/internal/sweep/sweep_test.go
index a6ba3cbdb..eccb1b763 100644
--- a/internal/sweep/sweep_test.go
+++ b/internal/sweep/sweep_test.go
@@ -50,6 +50,7 @@ func knownMissingSweepers() []string {
"aiven_kafka_native_acl",
"aiven_pg_database",
"aiven_kafka_user",
+ "aiven_kafka_quota",
"aiven_redis_user",
"aiven_valkey_user",
"aiven_opensearch_acl_config",