Skip to content

Commit

Permalink
feat(userconfigs): expand maps (#1707)
Browse files Browse the repository at this point in the history
  • Loading branch information
byashimov authored Apr 29, 2024
1 parent f22e4cf commit 22ce2dd
Show file tree
Hide file tree
Showing 19 changed files with 459 additions and 238 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ nav_order: 1
- Fix incorrect timeout values used in certain cases
- Fix sending `aiven_kafka_topic` config default values
- Fix sending `false` values in `aiven_kafka_topic` config
- Fix `aiven_pg` user config fields with `__dot__` substring in name
- Validate `aiven_kafka_topic` topic name conflict on `terraform plan`
- Mark service connection info blocks as `sensitive`. See SDK [bug](https://github.com/hashicorp/terraform-plugin-sdk/issues/201).
- Remove redundant service connection info fields
Expand Down
6 changes: 3 additions & 3 deletions docs/resources/kafka.md
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ Optional:
- `log_index_size_max_bytes` (Number) The maximum size in bytes of the offset index.
- `log_local_retention_bytes` (Number) The maximum size of local log segments that can grow for a partition before it gets eligible for deletion. If set to -2, the value of log.retention.bytes is used. The effective value should always be less than or equal to log.retention.bytes value.
- `log_local_retention_ms` (Number) The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value.
- `log_message_downconversion_enable` (Boolean) This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. .
- `log_message_downconversion_enable` (Boolean) This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests.
- `log_message_timestamp_difference_max_ms` (Number) The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message.
- `log_message_timestamp_type` (String) Define whether the timestamp in the message is message create time or log append time.
- `log_preallocate` (Boolean) Should pre allocate file when create new segment?
Expand All @@ -169,7 +169,7 @@ Optional:
- `replica_fetch_response_max_bytes` (Number) Maximum bytes expected for the entire fetch response (defaults to 10485760). Records are fetched in batches, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that progress can be made. As such, this is not an absolute maximum.
- `sasl_oauthbearer_expected_audience` (String) The (optional) comma-delimited setting for the broker to use to verify that the JWT was issued for one of the expected audiences.
- `sasl_oauthbearer_expected_issuer` (String) Optional setting for the broker to use to verify that the JWT was created by the expected issuer.
- `sasl_oauthbearer_jwks_endpoint_url` (String) OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC. .
- `sasl_oauthbearer_jwks_endpoint_url` (String) OIDC JWKS endpoint URL. By setting this the SASL SSL OAuth2/OIDC authentication is enabled. See also other options for SASL OAuth2/OIDC.
- `sasl_oauthbearer_sub_claim_name` (String) Name of the scope from which to extract the subject claim from the JWT. Defaults to sub.
- `socket_request_max_bytes` (Number) The maximum number of bytes in a socket request (defaults to 104857600).
- `transaction_partition_verification_enable` (Boolean) Enable verification that checks that the partition has been added to the transaction before writing transactional records to the partition.
Expand All @@ -195,7 +195,7 @@ Optional:
- `consumer_auto_offset_reset` (String) What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- `consumer_fetch_max_bytes` (Number) Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
- `consumer_isolation_level` (String) Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
- `consumer_max_partition_fetch_bytes` (Number) Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
- `consumer_max_partition_fetch_bytes` (Number) Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
- `consumer_max_poll_interval_ms` (Number) The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- `consumer_max_poll_records` (Number) The maximum number of records returned in a single call to poll() (defaults to 500).
- `offset_flush_interval_ms` (Number) The interval at which to try committing offsets for tasks (defaults to 60000).
Expand Down
2 changes: 1 addition & 1 deletion docs/resources/kafka_connect.md
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ Optional:
- `consumer_auto_offset_reset` (String) What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.
- `consumer_fetch_max_bytes` (Number) Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum.
- `consumer_isolation_level` (String) Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.
- `consumer_max_partition_fetch_bytes` (Number) Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. .
- `consumer_max_partition_fetch_bytes` (Number) Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress.
- `consumer_max_poll_interval_ms` (Number) The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000).
- `consumer_max_poll_records` (Number) The maximum number of records returned in a single call to poll() (defaults to 500).
- `offset_flush_interval_ms` (Number) The interval at which to try committing offsets for tasks (defaults to 60000).
Expand Down
2 changes: 1 addition & 1 deletion docs/resources/pg.md
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ Optional:
- `pg_partman_bgw__dot__interval` (Number) Sets the time interval to run pg_partman's scheduled tasks.
- `pg_partman_bgw__dot__role` (String) Controls which role to use for pg_partman's scheduled background tasks.
- `pg_stat_monitor__dot__pgsm_enable_query_plan` (Boolean) Enables or disables query plan monitoring.
- `pg_stat_monitor__dot__pgsm_max_buckets` (Number) Sets the maximum number of buckets .
- `pg_stat_monitor__dot__pgsm_max_buckets` (Number) Sets the maximum number of buckets.
- `pg_stat_statements__dot__track` (String) Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.
- `temp_file_limit` (Number) PostgreSQL temporary file limit in KiB, -1 for unlimited.
- `timezone` (String) PostgreSQL service timezone.
Expand Down
4 changes: 2 additions & 2 deletions docs/resources/service_integration_endpoint.md
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ Optional:
Required:

- `project_id` (String) GCP project id.
- `service_account_credentials` (String) This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
- `service_account_credentials` (String) This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.


<a id="nestedblock--external_google_cloud_logging_user_config"></a>
Expand All @@ -128,7 +128,7 @@ Required:

- `log_id` (String) Google Cloud Logging log id.
- `project_id` (String) GCP project id.
- `service_account_credentials` (String) This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys .
- `service_account_credentials` (String) This is a JSON object with the fields documented in https://cloud.google.com/iam/docs/creating-managing-service-account-keys.


<a id="nestedblock--external_kafka_user_config"></a>
Expand Down
156 changes: 82 additions & 74 deletions internal/sdkprovider/userconfig/converters/converters.go
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,19 @@ func getUserConfig(kind userConfigType, name string) *schema.Schema {
return nil
}

// getFieldMapping json field for tf field might be different, returns the mapping
func getFieldMapping(kind userConfigType, name string) map[string]string {
switch kind {
case ServiceUserConfig:
return service.GetFieldMapping(name)
case ServiceIntegrationUserConfig:
return serviceintegration.GetFieldMapping(name)
case ServiceIntegrationEndpointUserConfig:
return serviceintegrationendpoint.GetFieldMapping(name)
}
return nil
}

// SetUserConfig sets user config schema for given kind and name
func SetUserConfig(kind userConfigType, name string, s map[string]*schema.Schema) {
userConfig := getUserConfig(kind, name)
Expand Down Expand Up @@ -112,7 +125,7 @@ func expand(kind userConfigType, name string, d *schema.ResourceData) (map[strin
}

// Renames ip_filter_object/string to ip_filter
renameAliases(dto)
renameAliasesToDto(kind, name, dto)

if v, ok := dto["ip_filter"].([]any); ok && len(v) == 0 {
if _, ok := os.LookupEnv(AllowIPFilterPurge); !ok {
Expand All @@ -134,21 +147,10 @@ type stateCompose struct {
key string // state attribute name or schema.ResourceData key
path string // schema.ResourceData path, i.e., foo.0.bar.0.baz to get the value
schema *schema.Schema // tf schema
config cty.Value // tf file values, it knows if resource value is null
config cty.Value // tf file state, it knows if resource value is null
resource *schema.ResourceData // tf resource that has both tf state and data that is received from the API
}

// setItems returns schema.Set values
func (s *stateCompose) setItems() ([]any, error) {
result := make([]any, 0)
if s.config.IsNull() {
// Makes possible to send ip_filter=[] to clear the remote list.
return result, nil
}

return s.value().(*schema.Set).List(), nil
}

// listItems returns a list of object's states
// Must not use it with scalar types, because "schema" expects to have Resource
func (s *stateCompose) listItems() (result []*stateCompose) {
Expand Down Expand Up @@ -196,6 +198,10 @@ func (s *stateCompose) objectProperties() map[string]*stateCompose {
}
return props
}
func (s *stateCompose) valueType() schema.ValueType {
return s.schema.Type
}

func (s *stateCompose) value() any {
return s.resource.Get(s.path)
}
Expand All @@ -211,9 +217,9 @@ func (s *stateCompose) hasChange() bool {
return s.resource.HasChange(s.path)
}

func expandObj(state *stateCompose) (map[string]any, error) {
func expandObj(s *stateCompose) (map[string]any, error) {
m := make(map[string]any)
for k, v := range state.objectProperties() {
for k, v := range s.objectProperties() {
value, err := expandAttr(v)
if err != nil {
return nil, fmt.Errorf("%q field conversion error: %w", k, err)
Expand All @@ -225,35 +231,82 @@ func expandObj(state *stateCompose) (map[string]any, error) {
return m, nil
}

func expandScalar(state *stateCompose) (any, error) {
if state.isNull() {
func expandScalar(s *stateCompose) (any, error) {
if s.isNull() {
// Null scalar, no value in the config
return nil, nil
}
return state.value(), nil
return s.value(), nil
}

// expandAttr returns go value
func expandAttr(state *stateCompose) (any, error) {
switch state.schema.Type {
func expandAttr(s *stateCompose) (any, error) {
// Scalar values
switch s.valueType() {
case schema.TypeSet, schema.TypeList:
// See below
case schema.TypeMap:
return expandMap(s)
default:
return expandScalar(state)
return expandScalar(s)
}

if state.isNull() && !state.hasChange() {
// Here go schema.TypeMap, schema.TypeSet, schema.TypeList
if s.isNull() && !s.hasChange() {
// A value that hasn't been sent by user yet.
// But have been received from the API.
return nil, nil
}

if state.schema.Type == schema.TypeSet {
return state.setItems()
if s.valueType() == schema.TypeSet {
return expandSet(s)
}

return expandList(s)
}

// expandMap must return "any" type to discern "nil" and empty "map"
// to send empty "map" and skip "nil"
func expandMap(s *stateCompose) (any, error) {
if s.config.IsNull() {
return nil, nil
}

value := s.value()
m, ok := value.(map[string]any)
if !ok {
return nil, fmt.Errorf("%q expected to be map, but got %T", s.path, value)
}

// Sends map fields which are in config
result := make(map[string]any)
for k, v := range s.config.AsValueMap() {
if !v.IsNull() {
result[k] = m[k]
}
}
return result, nil
}

func expandSet(s *stateCompose) ([]any, error) {
// If the value was removed, sends an empty set
// Warning: can't handle nested (complex) objects.
// Use schema.TypeList instead
result := make([]any, 0)
if s.config.IsNull() {
// Makes possible to send ip_filter=[] to clear the remote list.
return result, nil
}

return s.value().(*schema.Set).List(), nil
}

// expandList returns a list of elements or a single object,
// because in TF an object is a list with a single element
func expandList(s *stateCompose) (any, error) {
// schema.TypeList
_, isObjList := state.schema.Elem.(*schema.Resource)
states := state.listItems()
_, isObjList := s.schema.Elem.(*schema.Resource)
states := s.listItems()
items := make([]any, 0, len(states))
for i := range states {
var exp any
Expand All @@ -275,7 +328,7 @@ func expandAttr(state *stateCompose) (any, error) {
}

// If schema.TypeList && MaxItems == 1, then it is an object
if isObjList && state.schema.MaxItems == 1 {
if isObjList && s.schema.MaxItems == 1 {
switch len(items) {
case 1:
// A plain object (in TF a list with one object is an object)
Expand Down Expand Up @@ -309,15 +362,8 @@ func flatten(kind userConfigType, name string, d *schema.ResourceData, dto map[s
key := userConfigKey(kind, name)
prefix := fmt.Sprintf("%s.0.", key)

// Renames ip_filter field
if _, ok := dto["ip_filter"]; ok {
assignAlias(d, prefix+"ip_filter", dto, "ip_filter")
}

// Renames namespaces field
if mapping, ok := drillKey(dto, "rules.0.mapping"); ok {
assignAlias(d, prefix+"rules.0.mapping.0.namespaces", mapping.(map[string]any), "namespaces")
}
// Renames ip_filter to ip_filter_object
renameAliasesToTfo(kind, name, dto, d)

// Copies "create only" fields from the original config.
// Like admin_password, that is received only on POST request when service is created.
Expand Down Expand Up @@ -433,48 +479,10 @@ func flattenList(s map[string]*schema.Schema, list []any) ([]any, error) {
return items, nil
}

// assignAlias renames keys for multi-typed properties, i.e. ip_filter -> [ip_filter_string, ip_filter_object]
func assignAlias(d *schema.ResourceData, path string, dto map[string]any, key string) {
values, ok := dto[key].([]any)
if !ok || len(values) == 0 {
return
}

var suffix string
const (
str = "_string"
obj = "_object"
)

// If DTO has objects, then it is foo_object
if _, ok := values[0].(map[string]any); ok {
suffix = obj
}

// If the state has foo_string, the user has new key
if _, ok := d.GetOk(path + str); ok {
suffix = str
}

if suffix != "" {
dto[key+suffix] = dto[key]
delete(dto, key)
}
}

// createOnlyFields these fields are received on POST request only
func createOnlyFields() []string {
return []string{
"admin_username",
"admin_password",
}
}

func aliasFieldsMap() map[string]string {
return map[string]string{
"ip_filter_string": "ip_filter",
"ip_filter_object": "ip_filter",
"rules.0.mapping.0.namespaces_string": "namespaces",
"rules.0.mapping.0.namespaces_object": "namespaces",
}
}
Loading

0 comments on commit 22ce2dd

Please sign in to comment.