diff --git a/docs/resources/cassandra.md b/docs/resources/cassandra.md index 45dc77e61..3ba3877e6 100644 --- a/docs/resources/cassandra.md +++ b/docs/resources/cassandra.md @@ -90,7 +90,7 @@ Optional: - `backup_hour` (Number) The hour of day (in UTC) when backup for the service is started. New backup is only started if previous backup has already completed. - `backup_minute` (Number) The minute of an hour when backup for the service is started. New backup is only started if previous backup has already completed. - `cassandra` (Block List, Max: 1) Cassandra configuration values (see [below for nested schema](#nestedblock--cassandra_user_config--cassandra)) -- `cassandra_version` (String) Cassandra version. +- `cassandra_version` (String) Enum: `3`, `4`, `4.1`. Cassandra version. - `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `ip_filter_object` (Block Set, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--cassandra_user_config--ip_filter_object)) - `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. diff --git a/docs/resources/dragonfly.md b/docs/resources/dragonfly.md index 79cc02a09..9b48b752d 100644 --- a/docs/resources/dragonfly.md +++ b/docs/resources/dragonfly.md @@ -87,7 +87,7 @@ Read-Only: Optional: - `cache_mode` (Boolean) Evict entries when getting close to maxmemory limit. The default value is `false`. -- `dragonfly_persistence` (String) When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked. +- `dragonfly_persistence` (String) Enum: `off`, `rdb`. When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked. - `dragonfly_ssl` (Boolean) Require SSL to access Dragonfly. The default value is `true`. - `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `ip_filter_object` (Block Set, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--dragonfly_user_config--ip_filter_object)) @@ -126,7 +126,7 @@ Optional: - `dbname` (String) Database name for bootstrapping the initial connection. - `ignore_dbs` (String) Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). -- `method` (String) The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types). +- `method` (String) Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types). - `password` (String, Sensitive) Password for authentication with the server where to migrate data from. - `ssl` (Boolean) The server where to migrate data from is secured with SSL. The default value is `true`. - `username` (String) User name for authentication with the server where to migrate data from. diff --git a/docs/resources/flink.md b/docs/resources/flink.md index 749796f88..4220ec18c 100644 --- a/docs/resources/flink.md +++ b/docs/resources/flink.md @@ -83,7 +83,7 @@ Optional: Optional: - `additional_backup_regions` (List of String, Deprecated) Additional Cloud Regions for Backup Replication. -- `flink_version` (String) Flink major version. +- `flink_version` (String) Enum: `1.16`. Flink major version. - `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `ip_filter_object` (Block Set, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--flink_user_config--ip_filter_object)) - `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. diff --git a/docs/resources/grafana.md b/docs/resources/grafana.md index ced385b36..7e904873b 100644 --- a/docs/resources/grafana.md +++ b/docs/resources/grafana.md @@ -88,9 +88,9 @@ Optional: - `additional_backup_regions` (List of String) Additional Cloud Regions for Backup Replication. - `alerting_enabled` (Boolean) Enable or disable Grafana legacy alerting functionality. This should not be enabled with unified_alerting_enabled. -- `alerting_error_or_timeout` (String) Default error or timeout setting for new alerting rules. +- `alerting_error_or_timeout` (String) Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules. - `alerting_max_annotations_to_keep` (Number) Max number of alert annotations that Grafana stores. 0 (default) keeps all alert annotations. -- `alerting_nodata_or_nullvalues` (String) Default value for 'no data or null values' for new alerting rules. +- `alerting_nodata_or_nullvalues` (String) Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules. - `allow_embedding` (Boolean) Allow embedding Grafana dashboards with iframe/frame/object/embed tags. Disabled by default to limit impact of clickjacking. - `auth_azuread` (Block List, Max: 1) Azure AD OAuth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_azuread)) - `auth_basic_enabled` (Boolean) Enable or disable basic authentication form, used by Grafana built-in login. @@ -98,7 +98,7 @@ Optional: - `auth_github` (Block List, Max: 1) Github Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_github)) - `auth_gitlab` (Block List, Max: 1) GitLab Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_gitlab)) - `auth_google` (Block List, Max: 1) Google Auth integration (see [below for nested schema](#nestedblock--grafana_user_config--auth_google)) -- `cookie_samesite` (String) Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value. +- `cookie_samesite` (String) Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value. - `custom_domain` (String) Serve the web frontend using a custom CNAME pointing to the Aiven DNS name. - `dashboard_previews_enabled` (Boolean) This feature is new in Grafana 9 and is quite resource intensive. It may cause low-end plans to work more slowly while the dashboard previews are rendering. - `dashboards_min_refresh_interval` (String) Signed sequence of decimal numbers, followed by a unit suffix (ms, s, m, h, d), e.g. 30s, 1h. @@ -126,7 +126,7 @@ Optional: - `static_ips` (Boolean) Use static public IP addresses. - `unified_alerting_enabled` (Boolean) Enable or disable Grafana unified alerting functionality. By default this is enabled and any legacy alerts will be migrated on upgrade to Grafana 9+. To stay on legacy alerting, set unified_alerting_enabled to false and alerting_enabled to true. See https://grafana.com/docs/grafana/latest/alerting/set-up/migrating-alerts/ for more details. - `user_auto_assign_org` (Boolean) Auto-assign new users on signup to main organization. Defaults to false. -- `user_auto_assign_org_role` (String) Set role for new signups. Defaults to Viewer. +- `user_auto_assign_org_role` (String) Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer. - `viewers_can_edit` (Boolean) Users with view-only permission can edit but not save dashboards. @@ -237,7 +237,7 @@ Required: - `access_key` (String) S3 access key. Requires permissions to the S3 bucket for the s3:PutObject and s3:PutObjectAcl actions. - `bucket_url` (String) Bucket URL for S3. -- `provider` (String) Provider type. +- `provider` (String) Enum: `s3`. Provider type. - `secret_key` (String) S3 secret key. @@ -291,7 +291,7 @@ Optional: - `from_name` (String) Name used in outgoing emails, defaults to Grafana. - `password` (String, Sensitive) Password for SMTP authentication. - `skip_verify` (Boolean) Skip verifying server certificate. Defaults to false. -- `starttls_policy` (String) Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS. +- `starttls_policy` (String) Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS. - `username` (String) Username for SMTP authentication. diff --git a/docs/resources/kafka.md b/docs/resources/kafka.md index 20990c4c8..9abeb0edf 100644 --- a/docs/resources/kafka.md +++ b/docs/resources/kafka.md @@ -118,7 +118,7 @@ Optional: - `kafka_rest` (Boolean) Enable Kafka-REST service. The default value is `false`. - `kafka_rest_authorization` (Boolean) Enable authorization in Kafka-REST service. - `kafka_rest_config` (Block List, Max: 1) Kafka REST configuration (see [below for nested schema](#nestedblock--kafka_user_config--kafka_rest_config)) -- `kafka_version` (String) Kafka major version. +- `kafka_version` (String) Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`. Kafka major version. - `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--kafka_user_config--private_access)) - `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--kafka_user_config--privatelink_access)) - `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--kafka_user_config--public_access)) @@ -146,7 +146,7 @@ Optional: Optional: - `auto_create_topics_enable` (Boolean) Enable auto creation of topics. -- `compression_type` (String) Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer. +- `compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer. - `connections_max_idle_ms` (Number) Idle connections timeout: the server socket processor threads close the connections that idle for longer than this. - `default_replication_factor` (Number) Replication factor for autocreated topics. - `group_initial_rebalance_delay_ms` (Number) The amount of time, in milliseconds, the group coordinator will wait for more consumers to join a new group before performing the first rebalance. A longer delay means potentially fewer rebalances, but increases the time until processing begins. The default value for this is 3 seconds. During development and testing it might be desirable to set this to 0 in order to not delay test execution time. @@ -156,7 +156,7 @@ Optional: - `log_cleaner_max_compaction_lag_ms` (Number) The maximum amount of time message will remain uncompacted. Only applicable for logs that are being compacted. - `log_cleaner_min_cleanable_ratio` (Number) Controls log compactor frequency. Larger value means more frequent compactions but also more space wasted for logs. Consider setting log.cleaner.max.compaction.lag.ms to enforce compactions sooner, instead of setting a very high value for this option. - `log_cleaner_min_compaction_lag_ms` (Number) The minimum time a message will remain uncompacted in the log. Only applicable for logs that are being compacted. -- `log_cleanup_policy` (String) The default cleanup policy for segments beyond the retention window. +- `log_cleanup_policy` (String) Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window. - `log_flush_interval_messages` (Number) The number of messages accumulated on a log partition before messages are flushed to disk. - `log_flush_interval_ms` (Number) The maximum time in ms that a message in any topic is kept in memory before flushed to disk. If not set, the value in log.flush.scheduler.interval.ms is used. - `log_index_interval_bytes` (Number) The interval with which Kafka adds an entry to the offset index. @@ -165,7 +165,7 @@ Optional: - `log_local_retention_ms` (Number) The number of milliseconds to keep the local log segments before it gets eligible for deletion. If set to -2, the value of log.retention.ms is used. The effective value should always be less than or equal to log.retention.ms value. - `log_message_downconversion_enable` (Boolean) This configuration controls whether down-conversion of message formats is enabled to satisfy consume requests. - `log_message_timestamp_difference_max_ms` (Number) The maximum difference allowed between the timestamp when a broker receives a message and the timestamp specified in the message. -- `log_message_timestamp_type` (String) Define whether the timestamp in the message is message create time or log append time. +- `log_message_timestamp_type` (String) Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time. - `log_preallocate` (Boolean) Should pre allocate file when create new segment? - `log_retention_bytes` (Number) The maximum size of the log before deleting messages. - `log_retention_hours` (Number) The number of hours to keep a log file before deleting it. @@ -207,10 +207,10 @@ Optional: Optional: -- `connector_client_config_override_policy` (String) Defines what client configurations can be overridden by the connector. Default is None. -- `consumer_auto_offset_reset` (String) What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. +- `connector_client_config_override_policy` (String) Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None. +- `consumer_auto_offset_reset` (String) Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. - `consumer_fetch_max_bytes` (Number) Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. -- `consumer_isolation_level` (String) Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. +- `consumer_isolation_level` (String) Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. - `consumer_max_partition_fetch_bytes` (Number) Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. - `consumer_max_poll_interval_ms` (Number) The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000). - `consumer_max_poll_records` (Number) The maximum number of records returned in a single call to poll() (defaults to 500). @@ -218,7 +218,7 @@ Optional: - `offset_flush_timeout_ms` (Number) Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000). - `producer_batch_size` (Number) This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384). - `producer_buffer_memory` (Number) The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432). -- `producer_compression_type` (String) Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression. +- `producer_compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression. - `producer_linger_ms` (Number) This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0. - `producer_max_request_size` (Number) This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. - `scheduled_rebalance_max_delay_ms` (Number) The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes. @@ -232,11 +232,11 @@ Optional: - `consumer_enable_auto_commit` (Boolean) If true the consumer's offset will be periodically committed to Kafka in the background. The default value is `true`. - `consumer_request_max_bytes` (Number) Maximum number of bytes in unencoded message keys and values by a single request. The default value is `67108864`. -- `consumer_request_timeout_ms` (Number) The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`. -- `name_strategy` (String) Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`. +- `consumer_request_timeout_ms` (Number) Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`. +- `name_strategy` (String) Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`. - `name_strategy_validation` (Boolean) If true, validate that given schema is registered under expected subject name by the used name strategy when producing messages. The default value is `true`. -- `producer_acks` (String) The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`. -- `producer_compression_type` (String) Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression. +- `producer_acks` (String) Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`. +- `producer_compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression. - `producer_linger_ms` (Number) Wait for up to the given delay to allow batching records together. The default value is `0`. - `producer_max_request_size` (Number) The maximum size of a request in bytes. Note that Kafka broker can also cap the record batch size. The default value is `1048576`. - `simpleconsumer_pool_size_max` (Number) Maximum number of SimpleConsumers that can be instantiated per broker. The default value is `25`. diff --git a/docs/resources/kafka_connect.md b/docs/resources/kafka_connect.md index 1548e8a6d..beab3e2e8 100644 --- a/docs/resources/kafka_connect.md +++ b/docs/resources/kafka_connect.md @@ -107,10 +107,10 @@ Optional: Optional: -- `connector_client_config_override_policy` (String) Defines what client configurations can be overridden by the connector. Default is None. -- `consumer_auto_offset_reset` (String) What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. +- `connector_client_config_override_policy` (String) Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None. +- `consumer_auto_offset_reset` (String) Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest. - `consumer_fetch_max_bytes` (Number) Records are fetched in batches by the consumer, and if the first record batch in the first non-empty partition of the fetch is larger than this value, the record batch will still be returned to ensure that the consumer can make progress. As such, this is not a absolute maximum. -- `consumer_isolation_level` (String) Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. +- `consumer_isolation_level` (String) Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired. - `consumer_max_partition_fetch_bytes` (Number) Records are fetched in batches by the consumer.If the first record batch in the first non-empty partition of the fetch is larger than this limit, the batch will still be returned to ensure that the consumer can make progress. - `consumer_max_poll_interval_ms` (Number) The maximum delay in milliseconds between invocations of poll() when using consumer group management (defaults to 300000). - `consumer_max_poll_records` (Number) The maximum number of records returned in a single call to poll() (defaults to 500). @@ -118,7 +118,7 @@ Optional: - `offset_flush_timeout_ms` (Number) Maximum number of milliseconds to wait for records to flush and partition offset data to be committed to offset storage before cancelling the process and restoring the offset data to be committed in a future attempt (defaults to 5000). - `producer_batch_size` (Number) This setting gives the upper bound of the batch size to be sent. If there are fewer than this many bytes accumulated for this partition, the producer will 'linger' for the linger.ms time waiting for more records to show up. A batch size of zero will disable batching entirely (defaults to 16384). - `producer_buffer_memory` (Number) The total bytes of memory the producer can use to buffer records waiting to be sent to the broker (defaults to 33554432). -- `producer_compression_type` (String) Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression. +- `producer_compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression. - `producer_linger_ms` (Number) This setting gives the upper bound on the delay for batching: once there is batch.size worth of records for a partition it will be sent immediately regardless of this setting, however if there are fewer than this many bytes accumulated for this partition the producer will 'linger' for the specified time waiting for more records to show up. Defaults to 0. - `producer_max_request_size` (Number) This setting will limit the number of record batches the producer will send in a single request to avoid sending huge requests. - `scheduled_rebalance_max_delay_ms` (Number) The maximum delay that is scheduled in order to wait for the return of one or more departed workers before rebalancing and reassigning their connectors and tasks to the group. During this period the connectors and tasks of the departed workers remain unassigned. Defaults to 5 minutes. diff --git a/docs/resources/m3aggregator.md b/docs/resources/m3aggregator.md index 60722f558..d35773dce 100644 --- a/docs/resources/m3aggregator.md +++ b/docs/resources/m3aggregator.md @@ -90,8 +90,8 @@ Optional: - `ip_filter` (Set of String, Deprecated) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `ip_filter_object` (Block Set, Max: 1024) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16' (see [below for nested schema](#nestedblock--m3aggregator_user_config--ip_filter_object)) - `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. -- `m3_version` (String) M3 major version (deprecated, use m3aggregator_version). -- `m3aggregator_version` (String) M3 major version (the minimum compatible version). +- `m3_version` (String) Enum: `1.1`, `1.2`, `1.5`. M3 major version (deprecated, use m3aggregator_version). +- `m3aggregator_version` (String) Enum: `1.1`, `1.2`, `1.5`. M3 major version (the minimum compatible version). - `service_log` (Boolean) Store logs for the service so that they are available in the HTTP API and console. - `static_ips` (Boolean) Use static public IP addresses. diff --git a/docs/resources/m3db.md b/docs/resources/m3db.md index 59efd1824..51583a08e 100644 --- a/docs/resources/m3db.md +++ b/docs/resources/m3db.md @@ -102,9 +102,9 @@ Optional: - `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `limits` (Block List, Max: 1) M3 limits (see [below for nested schema](#nestedblock--m3db_user_config--limits)) - `m3` (Block List, Max: 1) M3 specific configuration options (see [below for nested schema](#nestedblock--m3db_user_config--m3)) -- `m3_version` (String) M3 major version (deprecated, use m3db_version). +- `m3_version` (String) Enum: `1.1`, `1.2`, `1.5`. M3 major version (deprecated, use m3db_version). - `m3coordinator_enable_graphite_carbon_ingest` (Boolean) Enables access to Graphite Carbon plaintext metrics ingestion. It can be enabled only for services inside VPCs. The metrics are written to aggregated namespaces only. -- `m3db_version` (String) M3 major version (the minimum compatible version). +- `m3db_version` (String) Enum: `1.1`, `1.2`, `1.5`. M3 major version (the minimum compatible version). - `namespaces` (Block List, Max: 2147483647) List of M3 namespaces (see [below for nested schema](#nestedblock--m3db_user_config--namespaces)) - `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--m3db_user_config--private_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. @@ -162,7 +162,7 @@ Optional: Required: - `name` (String) The name of the namespace. -- `type` (String) The type of aggregation (aggregated/unaggregated). +- `type` (String) Enum: `aggregated`, `unaggregated`. The type of aggregation (aggregated/unaggregated). Optional: diff --git a/docs/resources/mysql.md b/docs/resources/mysql.md index 70c289367..3d9a36f15 100644 --- a/docs/resources/mysql.md +++ b/docs/resources/mysql.md @@ -122,7 +122,7 @@ Optional: - `ip_filter_string` (Set of String) Allow incoming connections from CIDR address block, e.g. '10.20.0.0/16'. - `migration` (Block List, Max: 1) Migrate data from existing server (see [below for nested schema](#nestedblock--mysql_user_config--migration)) - `mysql` (Block List, Max: 1) mysql.conf configuration values (see [below for nested schema](#nestedblock--mysql_user_config--mysql)) -- `mysql_version` (String) MySQL major version. +- `mysql_version` (String) Enum: `8`. MySQL major version. - `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--mysql_user_config--private_access)) - `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--mysql_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. @@ -156,7 +156,7 @@ Optional: - `dbname` (String) Database name for bootstrapping the initial connection. - `ignore_dbs` (String) Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). -- `method` (String) The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types). +- `method` (String) Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types). - `password` (String, Sensitive) Password for authentication with the server where to migrate data from. - `ssl` (Boolean) The server where to migrate data from is secured with SSL. The default value is `true`. - `username` (String) User name for authentication with the server where to migrate data from. @@ -184,7 +184,7 @@ Optional: - `innodb_thread_concurrency` (Number) Defines the maximum number of threads permitted inside of InnoDB. Default is 0 (infinite concurrency - no limit). - `innodb_write_io_threads` (Number) The number of I/O threads for write operations in InnoDB. Default is 4. Changing this parameter will lead to a restart of the MySQL service. - `interactive_timeout` (Number) The number of seconds the server waits for activity on an interactive connection before closing it. -- `internal_tmp_mem_storage_engine` (String) The storage engine for in-memory internal temporary tables. +- `internal_tmp_mem_storage_engine` (String) Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables. - `long_query_time` (Number) The slow_query_logs work as SQL statements that take more than long_query_time seconds to execute. Default is 10s. - `max_allowed_packet` (Number) Size of the largest message in bytes that can be received by the server. Default is 67108864 (64M). - `max_heap_table_size` (Number) Limits the size of internal in-memory tables. Also set tmp_table_size. Default is 16777216 (16M). diff --git a/docs/resources/opensearch.md b/docs/resources/opensearch.md index 01d688725..8fe477483 100644 --- a/docs/resources/opensearch.md +++ b/docs/resources/opensearch.md @@ -112,7 +112,7 @@ Optional: - `openid` (Block List, Max: 1) OpenSearch OpenID Connect Configuration (see [below for nested schema](#nestedblock--opensearch_user_config--openid)) - `opensearch` (Block List, Max: 1) OpenSearch settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch)) - `opensearch_dashboards` (Block List, Max: 1) OpenSearch Dashboards settings (see [below for nested schema](#nestedblock--opensearch_user_config--opensearch_dashboards)) -- `opensearch_version` (String) OpenSearch major version. +- `opensearch_version` (String) Enum: `1`, `2`. OpenSearch major version. - `private_access` (Block List, Max: 1) Allow access to selected service ports from private networks (see [below for nested schema](#nestedblock--opensearch_user_config--private_access)) - `privatelink_access` (Block List, Max: 1) Allow access to selected service components through Privatelink (see [below for nested schema](#nestedblock--opensearch_user_config--privatelink_access)) - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. @@ -133,7 +133,7 @@ Required: Optional: -- `sorting_algorithm` (String) Deletion sorting algorithm. The default value is `creation_date`. +- `sorting_algorithm` (String) Enum: `alphabetical`, `creation_date`. Deletion sorting algorithm. The default value is `creation_date`. @@ -242,12 +242,12 @@ Optional: Optional: - `allowed_tries` (Number) The number of login attempts allowed before login is blocked. -- `authentication_backend` (String) internal_authentication_backend_limiting.authentication_backend. +- `authentication_backend` (String) Enum: `internal`. internal_authentication_backend_limiting.authentication_backend. - `block_expiry_seconds` (Number) The duration of time that login remains blocked after a failed login. - `max_blocked_clients` (Number) internal_authentication_backend_limiting.max_blocked_clients. - `max_tracked_clients` (Number) The maximum number of tracked IP addresses that have failed login. - `time_window_seconds` (Number) The window of time in which the value for `allowed_tries` is enforced. -- `type` (String) internal_authentication_backend_limiting.type. +- `type` (String) Enum: `username`. internal_authentication_backend_limiting.type. @@ -260,7 +260,7 @@ Optional: - `max_blocked_clients` (Number) The maximum number of blocked IP addresses. - `max_tracked_clients` (Number) The maximum number of tracked IP addresses that have failed login. - `time_window_seconds` (Number) The window of time in which the value for `allowed_tries` is enforced. -- `type` (String) The type of rate limiting. +- `type` (String) Enum: `ip`. The type of rate limiting. diff --git a/docs/resources/pg.md b/docs/resources/pg.md index 71e1add91..7de51f181 100644 --- a/docs/resources/pg.md +++ b/docs/resources/pg.md @@ -149,7 +149,7 @@ Optional: - `pg_read_replica` (Boolean) Should the service which is being forked be a read replica (deprecated, use read_replica service integration instead). - `pg_service_to_fork_from` (String) Name of the PG Service from which to fork (deprecated, use service_to_fork_from). This has effect only when a new service is being created. - `pg_stat_monitor_enable` (Boolean) Enable the pg_stat_monitor extension. Enabling this extension will cause the cluster to be restarted.When this extension is enabled, pg_stat_statements results for utility commands are unreliable. The default value is `false`. -- `pg_version` (String) PostgreSQL major version. +- `pg_version` (String) Enum: `10`, `11`, `12`, `13`, `14`, `15`, `16`. PostgreSQL major version. - `pgaudit` (Block List, Max: 1) System-wide settings for the pgaudit extension (see [below for nested schema](#nestedblock--pg_user_config--pgaudit)) - `pgbouncer` (Block List, Max: 1) PGBouncer connection pooling settings (see [below for nested schema](#nestedblock--pg_user_config--pgbouncer)) - `pglookout` (Block List, Max: 1) System-wide settings for pglookout (see [below for nested schema](#nestedblock--pg_user_config--pglookout)) @@ -162,9 +162,9 @@ Optional: - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `shared_buffers_percentage` (Number) Percentage of total RAM that the database server uses for shared memory buffers. Valid range is 20-60 (float), which corresponds to 20% - 60%. This setting adjusts the shared_buffers configuration value. - `static_ips` (Boolean) Use static public IP addresses. -- `synchronous_replication` (String) Synchronous replication type. Note that the service plan also needs to support synchronous replication. +- `synchronous_replication` (String) Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication. - `timescaledb` (Block List, Max: 1) System-wide settings for the timescaledb extension (see [below for nested schema](#nestedblock--pg_user_config--timescaledb)) -- `variant` (String) Variant of the PostgreSQL service, may affect the features that are exposed by default. +- `variant` (String) Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default. - `work_mem` (Number) Sets the maximum amount of memory to be used by a query operation (such as a sort or hash table) before writing to temporary disk files, in MB. Default is 1MB + 0.075% of total RAM (up to 32MB). @@ -191,7 +191,7 @@ Optional: - `dbname` (String) Database name for bootstrapping the initial connection. - `ignore_dbs` (String) Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). -- `method` (String) The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types). +- `method` (String) Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types). - `password` (String, Sensitive) Password for authentication with the server where to migrate data from. - `ssl` (Boolean) The server where to migrate data from is secured with SSL. The default value is `true`. - `username` (String) User name for authentication with the server where to migrate data from. @@ -216,12 +216,12 @@ Optional: - `bgwriter_lru_maxpages` (Number) In each round, no more than this many buffers will be written by the background writer. Setting this to zero disables background writing. Default is 100. - `bgwriter_lru_multiplier` (Number) The average recent need for new buffers is multiplied by bgwriter_lru_multiplier to arrive at an estimate of the number that will be needed during the next round, (up to bgwriter_lru_maxpages). 1.0 represents a “just in time” policy of writing exactly the number of buffers predicted to be needed. Larger values provide some cushion against spikes in demand, while smaller values intentionally leave writes to be done by server processes. The default is 2.0. - `deadlock_timeout` (Number) This is the amount of time, in milliseconds, to wait on a lock before checking to see if there is a deadlock condition. -- `default_toast_compression` (String) Specifies the default TOAST compression method for values of compressible columns (the default is lz4). +- `default_toast_compression` (String) Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns (the default is lz4). - `idle_in_transaction_session_timeout` (Number) Time out sessions with open transactions after this number of milliseconds. - `jit` (Boolean) Controls system-wide use of Just-in-Time Compilation (JIT). - `log_autovacuum_min_duration` (Number) Causes each action executed by autovacuum to be logged if it ran for at least the specified number of milliseconds. Setting this to zero logs all autovacuum actions. Minus-one (the default) disables logging autovacuum actions. -- `log_error_verbosity` (String) Controls the amount of detail written in the server log for each message that is logged. -- `log_line_prefix` (String) Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc. +- `log_error_verbosity` (String) Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged. +- `log_line_prefix` (String) Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`. Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc. - `log_min_duration_statement` (Number) Log statements that take more than this number of milliseconds to run, -1 disables. - `log_temp_files` (Number) Log statements for each temporary file created larger than this number of kilobytes, -1 disables. - `max_files_per_process` (Number) PostgreSQL maximum number of files that can be open per process. @@ -242,13 +242,13 @@ Optional: - `pg_partman_bgw__dot__role` (String) Controls which role to use for pg_partman's scheduled background tasks. - `pg_stat_monitor__dot__pgsm_enable_query_plan` (Boolean) Enables or disables query plan monitoring. - `pg_stat_monitor__dot__pgsm_max_buckets` (Number) Sets the maximum number of buckets. -- `pg_stat_statements__dot__track` (String) Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top. +- `pg_stat_statements__dot__track` (String) Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top. - `temp_file_limit` (Number) PostgreSQL temporary file limit in KiB, -1 for unlimited. - `timezone` (String) PostgreSQL service timezone. - `track_activity_query_size` (Number) Specifies the number of bytes reserved to track the currently executing command for each active session. -- `track_commit_timestamp` (String) Record commit time of transactions. -- `track_functions` (String) Enables tracking of function call counts and time used. -- `track_io_timing` (String) Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms. +- `track_commit_timestamp` (String) Enum: `off`, `on`. Record commit time of transactions. +- `track_functions` (String) Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used. +- `track_io_timing` (String) Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms. - `wal_sender_timeout` (Number) Terminate replication connections that are inactive for longer than this amount of time, in milliseconds. Setting this value to zero disables the timeout. - `wal_writer_delay` (Number) WAL flush interval in milliseconds. Note that setting this value to lower than the default 200ms may negatively impact performance. @@ -274,7 +274,7 @@ Optional: - `log` (List of String) Specifies which classes of statements will be logged by session audit logging. - `log_catalog` (Boolean) Specifies that session logging should be enabled in the casewhere all relations in a statement are in pg_catalog. The default value is `true`. - `log_client` (Boolean) Specifies whether log messages will be visible to a client process such as psql. The default value is `false`. -- `log_level` (String) Specifies the log level that will be used for log entries. The default value is `log`. +- `log_level` (String) Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. The default value is `log`. - `log_max_string_length` (Number) Crop parameters representation and whole statements if they exceed this threshold. A (default) value of -1 disable the truncation. The default value is `-1`. - `log_nested_statements` (Boolean) This GUC allows to turn off logging nested statements, that is, statements that are executed as part of another ExecutorRun. The default value is `true`. - `log_parameter` (Boolean) Specifies that audit logging should include the parameters that were passed with the statement. The default value is `false`. @@ -293,7 +293,7 @@ Optional: - `autodb_idle_timeout` (Number) If the automatically created database pools have been unused this many seconds, they are freed. If 0 then timeout is disabled. (seconds). The default value is `3600`. - `autodb_max_db_connections` (Number) Do not allow more than this many server connections per database (regardless of user). Setting it to 0 means unlimited. -- `autodb_pool_mode` (String) PGBouncer pool mode. The default value is `transaction`. +- `autodb_pool_mode` (String) Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. The default value is `transaction`. - `autodb_pool_size` (Number) If non-zero then create automatically a pool of that size per user when a pool doesn't exist. The default value is `0`. - `ignore_startup_parameters` (List of String) List of parameters to ignore when given in startup packet. - `min_pool_size` (Number) Add more server connections to pool if below this number. Improves behavior when usual load comes suddenly back after period of total inactivity. The value is effectively capped at the pool size. The default value is `0`. diff --git a/docs/resources/redis.md b/docs/resources/redis.md index 08e01200f..7d3c11b65 100644 --- a/docs/resources/redis.md +++ b/docs/resources/redis.md @@ -102,18 +102,18 @@ Optional: - `project_to_fork_from` (String) Name of another project to fork a service from. This has effect only when a new service is being created. - `public_access` (Block List, Max: 1) Allow access to selected service ports from the public Internet (see [below for nested schema](#nestedblock--redis_user_config--public_access)) - `recovery_basebackup_name` (String) Name of the basebackup to restore in forked service. -- `redis_acl_channels_default` (String) Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default. +- `redis_acl_channels_default` (String) Enum: `allchannels`, `resetchannels`. Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default. - `redis_io_threads` (Number) Set Redis IO thread count. Changing this will cause a restart of the Redis service. - `redis_lfu_decay_time` (Number) LFU maxmemory-policy counter decay time in minutes. The default value is `1`. - `redis_lfu_log_factor` (Number) Counter logarithm factor for volatile-lfu and allkeys-lfu maxmemory-policies. The default value is `10`. -- `redis_maxmemory_policy` (String) Redis maxmemory-policy. The default value is `noeviction`. +- `redis_maxmemory_policy` (String) Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. The default value is `noeviction`. - `redis_notify_keyspace_events` (String) Set notify-keyspace-events option. - `redis_number_of_databases` (Number) Set number of Redis databases. Changing this will cause a restart of the Redis service. -- `redis_persistence` (String) When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked. +- `redis_persistence` (String) Enum: `off`, `rdb`. When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked. - `redis_pubsub_client_output_buffer_limit` (Number) Set output buffer limit for pub / sub clients in MB. The value is the hard limit, the soft limit is 1/4 of the hard limit. When setting the limit, be mindful of the available memory in the selected service plan. - `redis_ssl` (Boolean) Require SSL to access Redis. The default value is `true`. - `redis_timeout` (Number) Redis idle connection timeout in seconds. The default value is `300`. -- `redis_version` (String) Redis major version. +- `redis_version` (String) Enum: `7.0`. Redis major version. - `service_log` (Boolean) Store logs for the service so that they are available in the HTTP API and console. - `service_to_fork_from` (String) Name of another service to fork from. This has effect only when a new service is being created. - `static_ips` (Boolean) Use static public IP addresses. @@ -142,7 +142,7 @@ Optional: - `dbname` (String) Database name for bootstrapping the initial connection. - `ignore_dbs` (String) Comma-separated list of databases, which should be ignored during migration (supported by MySQL and PostgreSQL only at the moment). -- `method` (String) The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types). +- `method` (String) Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types). - `password` (String, Sensitive) Password for authentication with the server where to migrate data from. - `ssl` (Boolean) The server where to migrate data from is secured with SSL. The default value is `true`. - `username` (String) User name for authentication with the server where to migrate data from. diff --git a/docs/resources/service_integration.md b/docs/resources/service_integration.md index ce230a095..e66aead95 100644 --- a/docs/resources/service_integration.md +++ b/docs/resources/service_integration.md @@ -70,16 +70,16 @@ Optional: Required: - `columns` (Block List, Min: 1, Max: 100) Table columns (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--columns)) -- `data_format` (String) Message data format. The default value is `JSONEachRow`. +- `data_format` (String) Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`. Message data format. The default value is `JSONEachRow`. - `group_name` (String) Kafka consumers group. The default value is `clickhouse`. - `name` (String) Name of the table. - `topics` (Block List, Min: 1, Max: 100) Kafka topics (see [below for nested schema](#nestedblock--clickhouse_kafka_user_config--tables--topics)) Optional: -- `auto_offset_reset` (String) Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`. -- `date_time_input_format` (String) Method to read DateTime from text input formats. The default value is `basic`. -- `handle_error_mode` (String) How to handle errors for Kafka engine. The default value is `default`. +- `auto_offset_reset` (String) Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`. +- `date_time_input_format` (String) Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. The default value is `basic`. +- `handle_error_mode` (String) Enum: `default`, `stream`. How to handle errors for Kafka engine. The default value is `default`. - `max_block_size` (Number) Number of row collected by poll(s) for flushing data from Kafka. The default value is `0`. - `max_rows_per_message` (Number) The maximum number of rows produced in one kafka message for row-based formats. The default value is `1`. - `num_consumers` (Number) The number of consumers per table per replica. The default value is `1`. @@ -269,7 +269,7 @@ Optional: - `consumer_fetch_min_bytes` (Number) The minimum amount of data the server should return for a fetch request. - `producer_batch_size` (Number) The batch size in bytes producer will attempt to collect before publishing to broker. - `producer_buffer_memory` (Number) The amount of bytes producer can use for buffering data before publishing to broker. -- `producer_compression_type` (String) Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression. +- `producer_compression_type` (String) Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression. - `producer_linger_ms` (Number) The linger time (ms) for waiting new data to arrive for publishing. - `producer_max_request_size` (Number) The maximum request size in bytes. diff --git a/docs/resources/service_integration_endpoint.md b/docs/resources/service_integration_endpoint.md index 7462fc7ac..5ddcc985e 100644 --- a/docs/resources/service_integration_endpoint.md +++ b/docs/resources/service_integration_endpoint.md @@ -57,7 +57,7 @@ Optional: - `kafka_consumer_check_instances` (Number) Number of separate instances to fetch kafka consumer statistics with. - `kafka_consumer_stats_timeout` (Number) Number of seconds that datadog will wait to get consumer statistics from brokers. - `max_partition_contexts` (Number) Maximum number of partition contexts to send. -- `site` (String) Datadog intake site. Defaults to datadoghq.com. +- `site` (String) Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com. ### Nested Schema for `datadog_user_config.datadog_tags` @@ -137,17 +137,17 @@ Required: Required: - `bootstrap_servers` (String) Bootstrap servers. -- `security_protocol` (String) Security protocol. +- `security_protocol` (String) Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol. Optional: -- `sasl_mechanism` (String) SASL mechanism used for connections to the Kafka server. +- `sasl_mechanism` (String) Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server. - `sasl_plain_password` (String, Sensitive) Password for SASL PLAIN mechanism in the Kafka server. - `sasl_plain_username` (String) Username for SASL PLAIN mechanism in the Kafka server. - `ssl_ca_cert` (String) PEM-encoded CA certificate. - `ssl_client_cert` (String) PEM-encoded client certificate. - `ssl_client_key` (String) PEM-encoded client key. -- `ssl_endpoint_identification_algorithm` (String) The endpoint identification algorithm to validate server hostname using server certificate. +- `ssl_endpoint_identification_algorithm` (String) Enum: `https`, ``. The endpoint identification algorithm to validate server hostname using server certificate. @@ -180,7 +180,7 @@ Optional: - `password` (String, Sensitive) Password. - `ssl_client_certificate` (String) Client certificate. - `ssl_client_key` (String) Client key. -- `ssl_mode` (String) SSL Mode. The default value is `verify-full`. +- `ssl_mode` (String) Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL Mode. The default value is `verify-full`. - `ssl_root_cert` (String) SSL Root Cert. @@ -189,7 +189,7 @@ Optional: Required: -- `authentication` (String) Authentication method. +- `authentication` (String) Enum: `none`, `basic`. Authentication method. - `url` (String) Schema Registry URL. Optional: @@ -221,7 +221,7 @@ Optional: Required: -- `format` (String) Message format. The default value is `rfc5424`. +- `format` (String) Enum: `rfc5424`, `rfc3164`, `custom`. Message format. The default value is `rfc5424`. - `port` (Number) Rsyslog server port. The default value is `514`. - `server` (String) Rsyslog server IP address or hostname. - `tls` (Boolean) Require TLS. The default value is `true`. diff --git a/internal/sdkprovider/userconfig/service/cassandra.go b/internal/sdkprovider/userconfig/service/cassandra.go index dfac18b3a..362efad12 100644 --- a/internal/sdkprovider/userconfig/service/cassandra.go +++ b/internal/sdkprovider/userconfig/service/cassandra.go @@ -59,10 +59,10 @@ func cassandraUserConfig() *schema.Schema { Type: schema.TypeList, }, "cassandra_version": { - Description: "Cassandra version.", + Description: "Enum: `3`, `4`, `4.1`. Cassandra version.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"4", "3", "4.1"}, false), + ValidateFunc: validation.StringInSlice([]string{"3", "4", "4.1"}, false), }, "ip_filter": { Deprecated: "Deprecated. Use `ip_filter_string` instead.", diff --git a/internal/sdkprovider/userconfig/service/dragonfly.go b/internal/sdkprovider/userconfig/service/dragonfly.go index 53ea00dc5..2ba325919 100644 --- a/internal/sdkprovider/userconfig/service/dragonfly.go +++ b/internal/sdkprovider/userconfig/service/dragonfly.go @@ -20,7 +20,7 @@ func dragonflyUserConfig() *schema.Schema { Type: schema.TypeBool, }, "dragonfly_persistence": { - Description: "When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.", + Description: "Enum: `off`, `rdb`. When persistence is 'rdb', Dragonfly does RDB dumps each 10 minutes. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"off", "rdb"}, false), @@ -88,7 +88,7 @@ func dragonflyUserConfig() *schema.Schema { Type: schema.TypeString, }, "method": { - Description: "The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", + Description: "Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"dump", "replication"}, false), diff --git a/internal/sdkprovider/userconfig/service/flink.go b/internal/sdkprovider/userconfig/service/flink.go index 53f63f1c5..4d2adf917 100644 --- a/internal/sdkprovider/userconfig/service/flink.go +++ b/internal/sdkprovider/userconfig/service/flink.go @@ -26,7 +26,7 @@ func flinkUserConfig() *schema.Schema { Type: schema.TypeList, }, "flink_version": { - Description: "Flink major version.", + Description: "Enum: `1.16`. Flink major version.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"1.16"}, false), diff --git a/internal/sdkprovider/userconfig/service/grafana.go b/internal/sdkprovider/userconfig/service/grafana.go index eb18c2a71..7aa8c0ec1 100644 --- a/internal/sdkprovider/userconfig/service/grafana.go +++ b/internal/sdkprovider/userconfig/service/grafana.go @@ -30,7 +30,7 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeBool, }, "alerting_error_or_timeout": { - Description: "Default error or timeout setting for new alerting rules.", + Description: "Enum: `alerting`, `keep_state`. Default error or timeout setting for new alerting rules.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"alerting", "keep_state"}, false), @@ -41,7 +41,7 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "alerting_nodata_or_nullvalues": { - Description: "Default value for 'no data or null values' for new alerting rules.", + Description: "Enum: `alerting`, `no_data`, `keep_state`, `ok`. Default value for 'no data or null values' for new alerting rules.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"alerting", "no_data", "keep_state", "ok"}, false), @@ -322,7 +322,7 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeList, }, "cookie_samesite": { - Description: "Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.", + Description: "Enum: `lax`, `strict`, `none`. Cookie SameSite attribute: 'strict' prevents sending cookie for cross-site requests, effectively disabling direct linking from other sites to Grafana. 'lax' is the default value.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"lax", "strict", "none"}, false), @@ -429,7 +429,7 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeString, }, "provider": { - Description: "Provider type.", + Description: "Enum: `s3`. Provider type.", Required: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"s3"}, false), @@ -588,7 +588,7 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeBool, }, "starttls_policy": { - Description: "Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.", + Description: "Enum: `OpportunisticStartTLS`, `MandatoryStartTLS`, `NoStartTLS`. Either OpportunisticStartTLS, MandatoryStartTLS or NoStartTLS. Default is OpportunisticStartTLS.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"OpportunisticStartTLS", "MandatoryStartTLS", "NoStartTLS"}, false), @@ -619,7 +619,7 @@ func grafanaUserConfig() *schema.Schema { Type: schema.TypeBool, }, "user_auto_assign_org_role": { - Description: "Set role for new signups. Defaults to Viewer.", + Description: "Enum: `Viewer`, `Admin`, `Editor`. Set role for new signups. Defaults to Viewer.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"Viewer", "Admin", "Editor"}, false), diff --git a/internal/sdkprovider/userconfig/service/kafka.go b/internal/sdkprovider/userconfig/service/kafka.go index 0c1513bf1..5c03f8e36 100644 --- a/internal/sdkprovider/userconfig/service/kafka.go +++ b/internal/sdkprovider/userconfig/service/kafka.go @@ -83,7 +83,7 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeBool, }, "compression_type": { - Description: "Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.", + Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `uncompressed`, `producer`. Specify the final compression type for a given topic. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'uncompressed' which is equivalent to no compression; and 'producer' which means retain the original compression codec set by the producer.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "uncompressed", "producer"}, false), @@ -134,7 +134,7 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "log_cleanup_policy": { - Description: "The default cleanup policy for segments beyond the retention window.", + Description: "Enum: `delete`, `compact`, `compact,delete`. The default cleanup policy for segments beyond the retention window.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"delete", "compact", "compact,delete"}, false), @@ -180,7 +180,7 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "log_message_timestamp_type": { - Description: "Define whether the timestamp in the message is message create time or log append time.", + Description: "Enum: `CreateTime`, `LogAppendTime`. Define whether the timestamp in the message is message create time or log append time.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"CreateTime", "LogAppendTime"}, false), @@ -342,13 +342,13 @@ func kafkaUserConfig() *schema.Schema { Description: "Kafka Connect configuration values", Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "connector_client_config_override_policy": { - Description: "Defines what client configurations can be overridden by the connector. Default is None.", + Description: "Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"None", "All"}, false), }, "consumer_auto_offset_reset": { - Description: "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", + Description: "Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"earliest", "latest"}, false), @@ -359,7 +359,7 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "consumer_isolation_level": { - Description: "Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", + Description: "Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"read_uncommitted", "read_committed"}, false), @@ -400,7 +400,7 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "producer_compression_type": { - Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), @@ -454,13 +454,13 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeInt, }, "consumer_request_timeout_ms": { - Description: "The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.", + Description: "Enum: `1000`, `15000`, `30000`. The maximum total time to wait for messages for a request if the maximum number of messages has not yet been reached. The default value is `1000`.", Optional: true, Type: schema.TypeInt, ValidateFunc: validation.IntInSlice([]int{1000, 15000, 30000}), }, "name_strategy": { - Description: "Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.", + Description: "Enum: `topic_name`, `record_name`, `topic_record_name`. Name strategy to use when selecting subject for storing schemas. The default value is `topic_name`.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"topic_name", "record_name", "topic_record_name"}, false), @@ -471,13 +471,13 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeBool, }, "producer_acks": { - Description: "The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.", + Description: "Enum: `all`, `-1`, `0`, `1`. The number of acknowledgments the producer requires the leader to have received before considering a request complete. If set to 'all' or '-1', the leader will wait for the full set of in-sync replicas to acknowledge the record. The default value is `1`.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"all", "-1", "0", "1"}, false), }, "producer_compression_type": { - Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), @@ -503,10 +503,10 @@ func kafkaUserConfig() *schema.Schema { Type: schema.TypeList, }, "kafka_version": { - Description: "Kafka major version.", + Description: "Enum: `3.1`, `3.2`, `3.3`, `3.4`, `3.5`, `3.6`, `3.7`. Kafka major version.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"3.2", "3.3", "3.1", "3.4", "3.5", "3.6", "3.7"}, false), + ValidateFunc: validation.StringInSlice([]string{"3.1", "3.2", "3.3", "3.4", "3.5", "3.6", "3.7"}, false), }, "private_access": { Description: "Allow access to selected service ports from private networks", diff --git a/internal/sdkprovider/userconfig/service/kafka_connect.go b/internal/sdkprovider/userconfig/service/kafka_connect.go index 8f3adee2f..1290b4217 100644 --- a/internal/sdkprovider/userconfig/service/kafka_connect.go +++ b/internal/sdkprovider/userconfig/service/kafka_connect.go @@ -68,13 +68,13 @@ func kafkaConnectUserConfig() *schema.Schema { Description: "Kafka Connect configuration values", Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "connector_client_config_override_policy": { - Description: "Defines what client configurations can be overridden by the connector. Default is None.", + Description: "Enum: `None`, `All`. Defines what client configurations can be overridden by the connector. Default is None.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"None", "All"}, false), }, "consumer_auto_offset_reset": { - Description: "What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", + Description: "Enum: `earliest`, `latest`. What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server. Default is earliest.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"earliest", "latest"}, false), @@ -85,7 +85,7 @@ func kafkaConnectUserConfig() *schema.Schema { Type: schema.TypeInt, }, "consumer_isolation_level": { - Description: "Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", + Description: "Enum: `read_uncommitted`, `read_committed`. Transaction read isolation level. read_uncommitted is the default, but read_committed can be used if consume-exactly-once behavior is desired.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"read_uncommitted", "read_committed"}, false), @@ -126,7 +126,7 @@ func kafkaConnectUserConfig() *schema.Schema { Type: schema.TypeInt, }, "producer_compression_type": { - Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), diff --git a/internal/sdkprovider/userconfig/service/m3aggregator.go b/internal/sdkprovider/userconfig/service/m3aggregator.go index 84dc767a5..3de31b5d0 100644 --- a/internal/sdkprovider/userconfig/service/m3aggregator.go +++ b/internal/sdkprovider/userconfig/service/m3aggregator.go @@ -59,13 +59,13 @@ func m3aggregatorUserConfig() *schema.Schema { Type: schema.TypeSet, }, "m3_version": { - Description: "M3 major version (deprecated, use m3aggregator_version).", + Description: "Enum: `1.1`, `1.2`, `1.5`. M3 major version (deprecated, use m3aggregator_version).", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"1.1", "1.2", "1.5"}, false), }, "m3aggregator_version": { - Description: "M3 major version (the minimum compatible version).", + Description: "Enum: `1.1`, `1.2`, `1.5`. M3 major version (the minimum compatible version).", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"1.1", "1.2", "1.5"}, false), diff --git a/internal/sdkprovider/userconfig/service/m3db.go b/internal/sdkprovider/userconfig/service/m3db.go index e63f31cc7..b926e20a2 100644 --- a/internal/sdkprovider/userconfig/service/m3db.go +++ b/internal/sdkprovider/userconfig/service/m3db.go @@ -131,7 +131,7 @@ func m3dbUserConfig() *schema.Schema { Type: schema.TypeList, }, "m3_version": { - Description: "M3 major version (deprecated, use m3db_version).", + Description: "Enum: `1.1`, `1.2`, `1.5`. M3 major version (deprecated, use m3db_version).", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"1.1", "1.2", "1.5"}, false), @@ -142,7 +142,7 @@ func m3dbUserConfig() *schema.Schema { Type: schema.TypeBool, }, "m3db_version": { - Description: "M3 major version (the minimum compatible version).", + Description: "Enum: `1.1`, `1.2`, `1.5`. M3 major version (the minimum compatible version).", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"1.1", "1.2", "1.5"}, false), @@ -212,7 +212,7 @@ func m3dbUserConfig() *schema.Schema { Type: schema.TypeString, }, "type": { - Description: "The type of aggregation (aggregated/unaggregated).", + Description: "Enum: `aggregated`, `unaggregated`. The type of aggregation (aggregated/unaggregated).", Required: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"aggregated", "unaggregated"}, false), @@ -258,7 +258,7 @@ func m3dbUserConfig() *schema.Schema { "aggregations": { Description: "List of aggregations to be applied.", Elem: &schema.Schema{ - Description: "Aggregation to be applied.", + Description: "Enum: `Count`, `Last`, `Max`, `Mean`, `Median`, `Min`, `P10`, `P20`, `P30`, `P40`, `P50`, `P60`, `P70`, `P80`, `P90`, `P95`, `P99`, `P999`, `P9999`, `Stdev`, `Sum`, `SumSq`. Aggregation to be applied.", Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"Count", "Last", "Max", "Mean", "Median", "Min", "P10", "P20", "P30", "P40", "P50", "P60", "P70", "P80", "P90", "P95", "P99", "P999", "P9999", "Stdev", "Sum", "SumSq"}, false), }, diff --git a/internal/sdkprovider/userconfig/service/mysql.go b/internal/sdkprovider/userconfig/service/mysql.go index bfabcf022..1598a33bd 100644 --- a/internal/sdkprovider/userconfig/service/mysql.go +++ b/internal/sdkprovider/userconfig/service/mysql.go @@ -110,7 +110,7 @@ func mysqlUserConfig() *schema.Schema { Type: schema.TypeString, }, "method": { - Description: "The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", + Description: "Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"dump", "replication"}, false), @@ -230,7 +230,7 @@ func mysqlUserConfig() *schema.Schema { Type: schema.TypeInt, }, "internal_tmp_mem_storage_engine": { - Description: "The storage engine for in-memory internal temporary tables.", + Description: "Enum: `TempTable`, `MEMORY`. The storage engine for in-memory internal temporary tables.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"TempTable", "MEMORY"}, false), @@ -301,7 +301,7 @@ func mysqlUserConfig() *schema.Schema { Type: schema.TypeList, }, "mysql_version": { - Description: "MySQL major version.", + Description: "Enum: `8`. MySQL major version.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"8"}, false), diff --git a/internal/sdkprovider/userconfig/service/opensearch.go b/internal/sdkprovider/userconfig/service/opensearch.go index bb8529c66..e2745056a 100644 --- a/internal/sdkprovider/userconfig/service/opensearch.go +++ b/internal/sdkprovider/userconfig/service/opensearch.go @@ -48,7 +48,7 @@ func opensearchUserConfig() *schema.Schema { Type: schema.TypeString, }, "sorting_algorithm": { - Description: "Deletion sorting algorithm. The default value is `creation_date`.", + Description: "Enum: `alphabetical`, `creation_date`. Deletion sorting algorithm. The default value is `creation_date`.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"alphabetical", "creation_date"}, false), @@ -222,7 +222,7 @@ func opensearchUserConfig() *schema.Schema { Type: schema.TypeInt, }, "authentication_backend": { - Description: "internal_authentication_backend_limiting.authentication_backend.", + Description: "Enum: `internal`. internal_authentication_backend_limiting.authentication_backend.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"internal"}, false), @@ -248,7 +248,7 @@ func opensearchUserConfig() *schema.Schema { Type: schema.TypeInt, }, "type": { - Description: "internal_authentication_backend_limiting.type.", + Description: "Enum: `username`. internal_authentication_backend_limiting.type.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"username"}, false), @@ -287,7 +287,7 @@ func opensearchUserConfig() *schema.Schema { Type: schema.TypeInt, }, "type": { - Description: "The type of rate limiting.", + Description: "Enum: `ip`. The type of rate limiting.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"ip"}, false), @@ -532,7 +532,7 @@ func opensearchUserConfig() *schema.Schema { Type: schema.TypeList, }, "opensearch_version": { - Description: "OpenSearch major version.", + Description: "Enum: `1`, `2`. OpenSearch major version.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"1", "2"}, false), diff --git a/internal/sdkprovider/userconfig/service/pg.go b/internal/sdkprovider/userconfig/service/pg.go index d5ffb52ec..10c375fb1 100644 --- a/internal/sdkprovider/userconfig/service/pg.go +++ b/internal/sdkprovider/userconfig/service/pg.go @@ -110,7 +110,7 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeString, }, "method": { - Description: "The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", + Description: "Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"dump", "replication"}, false), @@ -215,7 +215,7 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeInt, }, "default_toast_compression": { - Description: "Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", + Description: "Enum: `lz4`, `pglz`. Specifies the default TOAST compression method for values of compressible columns (the default is lz4).", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"lz4", "pglz"}, false), @@ -236,13 +236,13 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeInt, }, "log_error_verbosity": { - Description: "Controls the amount of detail written in the server log for each message that is logged.", + Description: "Enum: `TERSE`, `DEFAULT`, `VERBOSE`. Controls the amount of detail written in the server log for each message that is logged.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"TERSE", "DEFAULT", "VERBOSE"}, false), }, "log_line_prefix": { - Description: "Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.", + Description: "Enum: `'pid=%p,user=%u,db=%d,app=%a,client=%h '`, `'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '`, `'%m [%p] %q[user=%u,db=%d,app=%a] '`. Choose from one of the available log-formats. These can support popular log analyzers like pgbadger, pganalyze etc.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"'pid=%p,user=%u,db=%d,app=%a,client=%h '", "'%t [%p]: [%l-1] user=%u,db=%d,app=%a,client=%h '", "'%m [%p] %q[user=%u,db=%d,app=%a] '"}, false), @@ -348,7 +348,7 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeInt, }, "pg_stat_statements__dot__track": { - Description: "Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", + Description: "Enum: `all`, `top`, `none`. Controls which statements are counted. Specify top to track top-level statements (those issued directly by clients), all to also track nested statements (such as statements invoked within functions), or none to disable statement statistics collection. The default value is top.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"all", "top", "none"}, false), @@ -369,19 +369,19 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeInt, }, "track_commit_timestamp": { - Description: "Record commit time of transactions.", + Description: "Enum: `off`, `on`. Record commit time of transactions.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"off", "on"}, false), }, "track_functions": { - Description: "Enables tracking of function call counts and time used.", + Description: "Enum: `all`, `pl`, `none`. Enables tracking of function call counts and time used.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"all", "pl", "none"}, false), }, "track_io_timing": { - Description: "Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", + Description: "Enum: `off`, `on`. Enables timing of database I/O calls. This parameter is off by default, because it will repeatedly query the operating system for the current time, which may cause significant overhead on some platforms.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"off", "on"}, false), @@ -457,10 +457,10 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeBool, }, "pg_version": { - Description: "PostgreSQL major version.", + Description: "Enum: `10`, `11`, `12`, `13`, `14`, `15`, `16`. PostgreSQL major version.", Optional: true, Type: schema.TypeString, - ValidateFunc: validation.StringInSlice([]string{"11", "12", "13", "14", "15", "10", "16"}, false), + ValidateFunc: validation.StringInSlice([]string{"10", "11", "12", "13", "14", "15", "16"}, false), }, "pgaudit": { Description: "System-wide settings for the pgaudit extension", @@ -473,6 +473,7 @@ func pgUserConfig() *schema.Schema { "log": { Description: "Specifies which classes of statements will be logged by session audit logging.", Elem: &schema.Schema{ + Description: "Enum: `all`, `ddl`, `function`, `misc`, `misc_set`, `read`, `role`, `write`.", Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"all", "ddl", "function", "misc", "misc_set", "read", "role", "write"}, false), }, @@ -490,7 +491,7 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeBool, }, "log_level": { - Description: "Specifies the log level that will be used for log entries. The default value is `log`.", + Description: "Enum: `debug1`, `debug2`, `debug3`, `debug4`, `debug5`, `info`, `notice`, `warning`, `log`. Specifies the log level that will be used for log entries. The default value is `log`.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"debug1", "debug2", "debug3", "debug4", "debug5", "info", "notice", "warning", "log"}, false), @@ -559,7 +560,7 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeInt, }, "autodb_pool_mode": { - Description: "PGBouncer pool mode. The default value is `transaction`.", + Description: "Enum: `session`, `transaction`, `statement`. PGBouncer pool mode. The default value is `transaction`.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"session", "transaction", "statement"}, false), @@ -572,6 +573,7 @@ func pgUserConfig() *schema.Schema { "ignore_startup_parameters": { Description: "List of parameters to ignore when given in startup packet.", Elem: &schema.Schema{ + Description: "Enum: `extra_float_digits`, `search_path`.", Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"extra_float_digits", "search_path"}, false), }, @@ -718,7 +720,7 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeBool, }, "synchronous_replication": { - Description: "Synchronous replication type. Note that the service plan also needs to support synchronous replication.", + Description: "Enum: `quorum`, `off`. Synchronous replication type. Note that the service plan also needs to support synchronous replication.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"quorum", "off"}, false), @@ -735,7 +737,7 @@ func pgUserConfig() *schema.Schema { Type: schema.TypeList, }, "variant": { - Description: "Variant of the PostgreSQL service, may affect the features that are exposed by default.", + Description: "Enum: `aiven`, `timescale`. Variant of the PostgreSQL service, may affect the features that are exposed by default.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"aiven", "timescale"}, false), diff --git a/internal/sdkprovider/userconfig/service/redis.go b/internal/sdkprovider/userconfig/service/redis.go index 8c5f163c7..1a9267735 100644 --- a/internal/sdkprovider/userconfig/service/redis.go +++ b/internal/sdkprovider/userconfig/service/redis.go @@ -82,7 +82,7 @@ func redisUserConfig() *schema.Schema { Type: schema.TypeString, }, "method": { - Description: "The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", + Description: "Enum: `dump`, `replication`. The migration method to be used (currently supported only by Redis, Dragonfly, MySQL and PostgreSQL service types).", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"dump", "replication"}, false), @@ -179,7 +179,7 @@ func redisUserConfig() *schema.Schema { Type: schema.TypeString, }, "redis_acl_channels_default": { - Description: "Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.", + Description: "Enum: `allchannels`, `resetchannels`. Determines default pub/sub channels' ACL for new users if ACL is not supplied. When this option is not defined, all_channels is assumed to keep backward compatibility. This option doesn't affect Redis configuration acl-pubsub-default.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"allchannels", "resetchannels"}, false), @@ -200,7 +200,7 @@ func redisUserConfig() *schema.Schema { Type: schema.TypeInt, }, "redis_maxmemory_policy": { - Description: "Redis maxmemory-policy. The default value is `noeviction`.", + Description: "Enum: `noeviction`, `allkeys-lru`, `volatile-lru`, `allkeys-random`, `volatile-random`, `volatile-ttl`, `volatile-lfu`, `allkeys-lfu`. Redis maxmemory-policy. The default value is `noeviction`.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"noeviction", "allkeys-lru", "volatile-lru", "allkeys-random", "volatile-random", "volatile-ttl", "volatile-lfu", "allkeys-lfu"}, false), @@ -216,7 +216,7 @@ func redisUserConfig() *schema.Schema { Type: schema.TypeInt, }, "redis_persistence": { - Description: "When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.", + Description: "Enum: `off`, `rdb`. When persistence is 'rdb', Redis does RDB dumps each 10 minutes if any key is changed. Also RDB dumps are done according to backup schedule for backup purposes. When persistence is 'off', no RDB dumps and backups are done, so data can be lost at any moment if service is restarted for any reason, or if service is powered off. Also service can't be forked.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"off", "rdb"}, false), @@ -237,7 +237,7 @@ func redisUserConfig() *schema.Schema { Type: schema.TypeInt, }, "redis_version": { - Description: "Redis major version.", + Description: "Enum: `7.0`. Redis major version.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"7.0"}, false), diff --git a/internal/sdkprovider/userconfig/serviceintegration/clickhouse_kafka.go b/internal/sdkprovider/userconfig/serviceintegration/clickhouse_kafka.go index 051b9d8dc..04aea72ec 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/clickhouse_kafka.go +++ b/internal/sdkprovider/userconfig/serviceintegration/clickhouse_kafka.go @@ -17,7 +17,7 @@ func clickhouseKafkaUserConfig() *schema.Schema { Description: "Tables to create", Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "auto_offset_reset": { - Description: "Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.", + Description: "Enum: `smallest`, `earliest`, `beginning`, `largest`, `latest`, `end`. Action to take when there is no initial offset in offset store or the desired offset is out of range. The default value is `earliest`.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"smallest", "earliest", "beginning", "largest", "latest", "end"}, false), @@ -41,13 +41,13 @@ func clickhouseKafkaUserConfig() *schema.Schema { Type: schema.TypeList, }, "data_format": { - Description: "Message data format. The default value is `JSONEachRow`.", + Description: "Enum: `Avro`, `CSV`, `JSONAsString`, `JSONCompactEachRow`, `JSONCompactStringsEachRow`, `JSONEachRow`, `JSONStringsEachRow`, `MsgPack`, `TSKV`, `TSV`, `TabSeparated`, `RawBLOB`, `AvroConfluent`. Message data format. The default value is `JSONEachRow`.", Required: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"Avro", "CSV", "JSONAsString", "JSONCompactEachRow", "JSONCompactStringsEachRow", "JSONEachRow", "JSONStringsEachRow", "MsgPack", "TSKV", "TSV", "TabSeparated", "RawBLOB", "AvroConfluent"}, false), }, "date_time_input_format": { - Description: "Method to read DateTime from text input formats. The default value is `basic`.", + Description: "Enum: `basic`, `best_effort`, `best_effort_us`. Method to read DateTime from text input formats. The default value is `basic`.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"basic", "best_effort", "best_effort_us"}, false), @@ -58,7 +58,7 @@ func clickhouseKafkaUserConfig() *schema.Schema { Type: schema.TypeString, }, "handle_error_mode": { - Description: "How to handle errors for Kafka engine. The default value is `default`.", + Description: "Enum: `default`, `stream`. How to handle errors for Kafka engine. The default value is `default`.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"default", "stream"}, false), diff --git a/internal/sdkprovider/userconfig/serviceintegration/external_aws_cloudwatch_logs.go b/internal/sdkprovider/userconfig/serviceintegration/external_aws_cloudwatch_logs.go index 42e3231bc..d81f8d8a5 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/external_aws_cloudwatch_logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/external_aws_cloudwatch_logs.go @@ -16,7 +16,7 @@ func externalAwsCloudwatchLogsUserConfig() *schema.Schema { Elem: &schema.Resource{Schema: map[string]*schema.Schema{"selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), }, diff --git a/internal/sdkprovider/userconfig/serviceintegration/external_elasticsearch_logs.go b/internal/sdkprovider/userconfig/serviceintegration/external_elasticsearch_logs.go index 7a95fecf0..61f0fa4c3 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/external_elasticsearch_logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/external_elasticsearch_logs.go @@ -16,7 +16,7 @@ func externalElasticsearchLogsUserConfig() *schema.Schema { Elem: &schema.Resource{Schema: map[string]*schema.Schema{"selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), }, diff --git a/internal/sdkprovider/userconfig/serviceintegration/external_opensearch_logs.go b/internal/sdkprovider/userconfig/serviceintegration/external_opensearch_logs.go index fefef4d16..5ff9a4d65 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/external_opensearch_logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/external_opensearch_logs.go @@ -16,7 +16,7 @@ func externalOpensearchLogsUserConfig() *schema.Schema { Elem: &schema.Resource{Schema: map[string]*schema.Schema{"selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), }, diff --git a/internal/sdkprovider/userconfig/serviceintegration/kafka_logs.go b/internal/sdkprovider/userconfig/serviceintegration/kafka_logs.go index 88fe47547..21af2c204 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/kafka_logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/kafka_logs.go @@ -22,7 +22,7 @@ func kafkaLogsUserConfig() *schema.Schema { "selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), }, diff --git a/internal/sdkprovider/userconfig/serviceintegration/kafka_mirrormaker.go b/internal/sdkprovider/userconfig/serviceintegration/kafka_mirrormaker.go index 65084e21b..a827e8330 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/kafka_mirrormaker.go +++ b/internal/sdkprovider/userconfig/serviceintegration/kafka_mirrormaker.go @@ -38,7 +38,7 @@ func kafkaMirrormakerUserConfig() *schema.Schema { Type: schema.TypeInt, }, "producer_compression_type": { - Description: "Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", + Description: "Enum: `gzip`, `snappy`, `lz4`, `zstd`, `none`. Specify the default compression type for producers. This configuration accepts the standard compression codecs ('gzip', 'snappy', 'lz4', 'zstd'). It additionally accepts 'none' which is the default and equivalent to no compression.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"gzip", "snappy", "lz4", "zstd", "none"}, false), diff --git a/internal/sdkprovider/userconfig/serviceintegration/logs.go b/internal/sdkprovider/userconfig/serviceintegration/logs.go index 037f8487b..0acfc8c7f 100644 --- a/internal/sdkprovider/userconfig/serviceintegration/logs.go +++ b/internal/sdkprovider/userconfig/serviceintegration/logs.go @@ -27,7 +27,7 @@ func logsUserConfig() *schema.Schema { "selected_log_fields": { Description: "The list of logging fields that will be sent to the integration logging service. The MESSAGE and timestamp fields are always sent.", Elem: &schema.Schema{ - Description: "Log field name.", + Description: "Enum: `HOSTNAME`, `PRIORITY`, `REALTIME_TIMESTAMP`, `service_name`, `SYSTEMD_UNIT`. Log field name.", Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"HOSTNAME", "PRIORITY", "REALTIME_TIMESTAMP", "service_name", "SYSTEMD_UNIT"}, false), }, diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/datadog.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/datadog.go index 149253491..a2ff2637d 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/datadog.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/datadog.go @@ -59,7 +59,7 @@ func datadogUserConfig() *schema.Schema { Type: schema.TypeInt, }, "site": { - Description: "Datadog intake site. Defaults to datadoghq.com.", + Description: "Enum: `datadoghq.com`, `datadoghq.eu`, `us3.datadoghq.com`, `us5.datadoghq.com`, `ddog-gov.com`, `ap1.datadoghq.com`. Datadog intake site. Defaults to datadoghq.com.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"datadoghq.com", "datadoghq.eu", "us3.datadoghq.com", "us5.datadoghq.com", "ddog-gov.com", "ap1.datadoghq.com"}, false), diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_kafka.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_kafka.go index eccebdfe8..e4764f17d 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_kafka.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_kafka.go @@ -20,7 +20,7 @@ func externalKafkaUserConfig() *schema.Schema { Type: schema.TypeString, }, "sasl_mechanism": { - Description: "SASL mechanism used for connections to the Kafka server.", + Description: "Enum: `PLAIN`, `SCRAM-SHA-256`, `SCRAM-SHA-512`. SASL mechanism used for connections to the Kafka server.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"PLAIN", "SCRAM-SHA-256", "SCRAM-SHA-512"}, false), @@ -37,7 +37,7 @@ func externalKafkaUserConfig() *schema.Schema { Type: schema.TypeString, }, "security_protocol": { - Description: "Security protocol.", + Description: "Enum: `PLAINTEXT`, `SSL`, `SASL_PLAINTEXT`, `SASL_SSL`. Security protocol.", Required: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"PLAINTEXT", "SSL", "SASL_PLAINTEXT", "SASL_SSL"}, false), @@ -58,7 +58,7 @@ func externalKafkaUserConfig() *schema.Schema { Type: schema.TypeString, }, "ssl_endpoint_identification_algorithm": { - Description: "The endpoint identification algorithm to validate server hostname using server certificate.", + Description: "Enum: `https`, ``. The endpoint identification algorithm to validate server hostname using server certificate.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"https", ""}, false), diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_postgresql.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_postgresql.go index 0b451790c..a43876913 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_postgresql.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_postgresql.go @@ -46,7 +46,7 @@ func externalPostgresqlUserConfig() *schema.Schema { Type: schema.TypeString, }, "ssl_mode": { - Description: "SSL Mode. The default value is `verify-full`.", + Description: "Enum: `disable`, `allow`, `prefer`, `require`, `verify-ca`, `verify-full`. SSL Mode. The default value is `verify-full`.", Optional: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"disable", "allow", "prefer", "require", "verify-ca", "verify-full"}, false), diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_schema_registry.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_schema_registry.go index 8d73cad7c..c2b90a599 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_schema_registry.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/external_schema_registry.go @@ -15,7 +15,7 @@ func externalSchemaRegistryUserConfig() *schema.Schema { DiffSuppressFunc: diff.SuppressUnchanged, Elem: &schema.Resource{Schema: map[string]*schema.Schema{ "authentication": { - Description: "Authentication method.", + Description: "Enum: `none`, `basic`. Authentication method.", Required: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"none", "basic"}, false), diff --git a/internal/sdkprovider/userconfig/serviceintegrationendpoint/rsyslog.go b/internal/sdkprovider/userconfig/serviceintegrationendpoint/rsyslog.go index ff694ec5b..be68b1a0d 100644 --- a/internal/sdkprovider/userconfig/serviceintegrationendpoint/rsyslog.go +++ b/internal/sdkprovider/userconfig/serviceintegrationendpoint/rsyslog.go @@ -25,7 +25,7 @@ func rsyslogUserConfig() *schema.Schema { Type: schema.TypeString, }, "format": { - Description: "Message format. The default value is `rfc5424`.", + Description: "Enum: `rfc5424`, `rfc3164`, `custom`. Message format. The default value is `rfc5424`.", Required: true, Type: schema.TypeString, ValidateFunc: validation.StringInSlice([]string{"rfc5424", "rfc3164", "custom"}, false), diff --git a/ucgenerator/main.go b/ucgenerator/main.go index 760ce7996..bf7c31cf2 100644 --- a/ucgenerator/main.go +++ b/ucgenerator/main.go @@ -318,6 +318,14 @@ func getSchemaValues(o *object) (jen.Dict, error) { func getDescription(o *object) string { desc := make([]string, 0) + if o.Enum != nil { + values := make([]string, 0) + for _, v := range o.Enum { + values = append(values, fmt.Sprintf("`%s`", v.Value)) + } + desc = append(desc, fmt.Sprintf("Enum: %s.", strings.Join(values, ", "))) + } + d := o.Description if len(d) < len(o.Title) { d = o.Title diff --git a/ucgenerator/models.go b/ucgenerator/models.go index 72ca57f79..21e9c33a8 100644 --- a/ucgenerator/models.go +++ b/ucgenerator/models.go @@ -5,6 +5,7 @@ import ( "fmt" "log" "regexp" + "sort" "strings" "github.com/stoewer/go-strcase" @@ -163,6 +164,13 @@ func (o *object) init(name string) { if o.Type == objectTypeString && o.Default != nil && o.Default.(string) == "" { o.Default = nil } + + // Sorts version enum values + if o.Enum != nil && strings.HasSuffix(name, "version") { + sort.Slice(o.Enum, func(i, j int) bool { + return o.Enum[i].Value < o.Enum[j].Value + }) + } } func (o *object) isNestedBlock() bool {