diff --git a/Makefile b/Makefile index 48c58563f..ae9021e0e 100644 --- a/Makefile +++ b/Makefile @@ -27,7 +27,7 @@ DOCS_OUT = $(shell echo $${DOCS_OUT:-$(my_d)/builds/docs/yaml}) UNAME_S := $(shell uname -s) UNAME_M := $(shell uname -m) -GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*") +GOFILES_NOVENDOR_NOMOCK = $(shell find . -type f -name '*.go' -not -path "./vendor/*" -not -path "*_mock.go") GOOS = linux ifeq ($(UNAME_S),Darwin) @@ -113,7 +113,7 @@ shellcheck: gofmt_check: @echo "==> ensure code adheres to gofmt (with vendor directory excluded)" @echo "" - @GOFMT=$$(gofmt -l ${GOFILES_NOVENDOR}); \ + @GOFMT=$$(gofmt -w -r 'interface{} -> any' -l ${GOFILES_NOVENDOR_NOMOCK}); \ if [ -n "$${GOFMT}" ]; then \ echo "gofmt checking failed:\n"; echo "$${GOFMT} \n"; exit 1; \ fi diff --git a/args.go b/args.go index d2ee82424..5f351f217 100644 --- a/args.go +++ b/args.go @@ -366,6 +366,8 @@ const ( ArgDatabaseRestoreFromTimestamp = "restore-from-timestamp" // ArgDatabaseEngine is a flag for specifying which database engine to use ArgDatabaseEngine = "engine" + // ArgDatabaseConfigJson is a flag for specifying the database configuration in JSON format for an update + ArgDatabaseConfigJson = "config-json" // ArgDatabaseNumNodes is the number of nodes in the database cluster ArgDatabaseNumNodes = "num-nodes" // ArgDatabaseStorageSizeMib is the amount of disk space, in MiB, that should be allocated to the database cluster @@ -387,6 +389,53 @@ const ( // ArgDatabasePrivateConnectionBool determine if the private connection details should be shown ArgDatabasePrivateConnectionBool = "private" + // ArgDatabaseTopicReplicationFactor is the replication factor of a kafka topic + ArgDatabaseTopicReplicationFactor = "replication-factor" + // ArgDatabaseTopicPartitionCount is the number of partitions that are associated with a kafka topic + ArgDatabaseTopicPartitionCount = "partition-count" + // ArgDatabaseTopicCleanupPolicy is the cleanup policy associated with a kafka topic + ArgDatabaseTopicCleanupPolicy = "cleanup-policy" + // ArgDatabaseTopicCompressionType is the compression algorithm used for a kafka topic + ArgDatabaseTopicCompressionType = "compression-type" + // ArgDatabaseTopicDeleteRetentionMS is the amount of time, in ms, to retain delete tombstone markers for a kafka topic + ArgDatabaseTopicDeleteRetentionMS = "delete-retention-ms" + // ArgDatabaseTopicFileDeleteDelayMS is the amount of time, in ms, to wait before deleting a file from the filesystem + ArgDatabaseTopicFileDeleteDelayMS = "file-delete-delay-ms" + // ArgDatabaseTopicFlushMessages is the size, in bytes, of all messages to accumulate on a partition before flushing them to disk + ArgDatabaseTopicFlushMessages = "flush-messages" + // ArgDatabaseTopicFlushMS is the amount of time, in ms, a message is kept in memory before it is flushed to disk + ArgDatabaseTopicFlushMS = "flush-ms" + // ArgDatabaseTopicIntervalIndexBytes is the number of bytes between entries being added into the offset index + ArgDatabaseTopicIntervalIndexBytes = "interval-index-bytes" + // ArgDatabaseTopicMaxCompactionLagMS is the maximum amount of time, in ms, that a message will remain uncompacted (if compaction is enabled) + ArgDatabaseTopicMaxCompactionLagMS = "max-compaction-lag-ms" + // ArgDatabaseTopicMaxMessageBytes is the maximum size, in bytes, of the largest record batch that can be sent to the server + ArgDatabaseTopicMaxMessageBytes = "max-message-bytes" + // ArgDatabaseTopicMesssageDownConversionEnable determines whether brokers should convert messages for consumers expecting older message formats + ArgDatabaseTopicMesssageDownConversionEnable = "message-down-conversion-enable" + // ArgDatabaseTopicMessageFormatVersion is the version used by the broker to append messages to the kafka topic logs + ArgDatabaseTopicMessageFormatVersion = "message-format-version" + // ArgDatabaseTopicMessageTimestampType is the timestamp used for messages + ArgDatabaseTopicMessageTimestampType = "message-timestamp-type" + // ArgDatabaseTopicMinCleanableDirtyRatio is ratio, between 0 and 1, specifying the frequenty of log compaction + ArgDatabaseTopicMinCleanableDirtyRatio = "min-cleanable-dirty-ratio" + // ArgDatabaseTopicMinCompactionLagMS is the minimum time, in ms, that a message will remain uncompacted + ArgDatabaseTopicMinCompactionLagMS = "min-compaction-lag-ms" + // ArgDatabaseTopicMinInsyncReplicas is the minimum number of replicas that must ACK a write for the write to be considered successful + ArgDatabaseTopicMinInsyncReplicas = "min-insync-replicas" + // ArgDatabaseTopicPreallocate determines whether a file should be preallocated on disk when creating a new log segment + ArgDatabaseTopicPreallocate = "preallocate" + // ArgDatabaseTopicRetentionBytes is the maximum size, in bytes, of a topic log before messages are deleted + ArgDatabaseTopicRetentionBytes = "retention-bytes" + // ArgDatabaseTopicRetentionMS is the maximum time, in ms, that a message is retained before deleting it + ArgDatabaseTopicRetentionMS = "retention-ms" + // ArgDatabaseTopicSegmentBytes is the maximum size, in bytes, of a single log file + ArgDatabaseTopicSegmentBytes = "segment-bytes" + // ArgDatabaseTopicSegmentJitterMS is the maximum random jitter, in ms, subtracted from the scheduled segment roll time to avoid thundering herds of segment rolling + ArgDatabaseTopicSegmentJitterMS = "segment-jitter-ms" + // ArgDatabaseTopicSegmentMS is the period of time, in ms, after which the log will be forced to roll if the segment file isn't full + ArgDatabaseTopicSegmentMS = "segment-ms" + // ArgPrivateNetworkUUID is the flag for VPC UUID ArgPrivateNetworkUUID = "private-network-uuid" diff --git a/commands/activations_test.go b/commands/activations_test.go index f2c6c79f1..94f079a35 100644 --- a/commands/activations_test.go +++ b/commands/activations_test.go @@ -39,15 +39,15 @@ func TestActivationsCommand(t *testing.T) { assert.ElementsMatch(t, expected, names) } -var hello1Result = whisk.Result(map[string]interface{}{ +var hello1Result = whisk.Result(map[string]any{ "body": "Hello stranger!", }) -var hello2Result = whisk.Result(map[string]interface{}{ +var hello2Result = whisk.Result(map[string]any{ "body": "Hello Archie!", }) -var hello3Result = whisk.Result(map[string]interface{}{ +var hello3Result = whisk.Result(map[string]any{ "error": "Missing main/no code to execute.", }) @@ -354,10 +354,10 @@ func TestActivationsList(t *testing.T) { } count := false - var limit interface{} - var since interface{} - var upto interface{} - var skip interface{} + var limit any + var since any + var upto any + var skip any if tt.doctlFlags != nil { for k, v := range tt.doctlFlags { @@ -466,8 +466,8 @@ func TestActivationsLogs(t *testing.T) { activationId = config.Args[0] } - var limit interface{} - var funcName interface{} + var limit any + var funcName any if tt.doctlFlags != nil { for k, v := range tt.doctlFlags { diff --git a/commands/apps_dev.go b/commands/apps_dev.go index c2a4ff078..1cd2be567 100644 --- a/commands/apps_dev.go +++ b/commands/apps_dev.go @@ -516,8 +516,8 @@ func appDevPrepareEnvironment(ctx context.Context, ws *workspace.AppDev, cli bui } // TODO: get stack run image from builder image md after we pull it, see below - images = append(images, "digitaloceanapps/apps-run:heroku-18_1b6264d") - images = append(images, "digitaloceanapps/apps-run:heroku-22_1b6264d") + images = append(images, "digitaloceanapps/apps-run:heroku-18_db5978a") + images = append(images, "digitaloceanapps/apps-run:heroku-22_db5978a") } if componentSpec.GetType() == godo.AppComponentTypeStaticSite { diff --git a/commands/auth.go b/commands/auth.go index 6c6af7982..2c9ef2789 100644 --- a/commands/auth.go +++ b/commands/auth.go @@ -222,7 +222,7 @@ func RunAuthList(c *CmdConfig) error { return nil } -func displayAuthContexts(out io.Writer, currentContext string, contexts map[string]interface{}) { +func displayAuthContexts(out io.Writer, currentContext string, contexts map[string]any) { // Because the default context isn't present on the auth-contexts field, // we add it manually so that it's always included in the output, and so // we can check if it's the current context. diff --git a/commands/auth_test.go b/commands/auth_test.go index f3f6cb7c1..9d83cd39a 100644 --- a/commands/auth_test.go +++ b/commands/auth_test.go @@ -90,11 +90,11 @@ func TestAuthInitConfig(t *testing.T) { // Ensure that the dev.config.set.dev-config setting is correct to prevent // a conflict with the base config setting. devConfig := configFile["dev"] - devConfigSetting := devConfig.(map[interface{}]interface{})["config"] - expectedConfigSetting := map[interface{}]interface{}( - map[interface{}]interface{}{ - "set": map[interface{}]interface{}{"dev-config": ""}, - "unset": map[interface{}]interface{}{"dev-config": ""}, + devConfigSetting := devConfig.(map[any]any)["config"] + expectedConfigSetting := map[any]any( + map[any]any{ + "set": map[any]any{"dev-config": ""}, + "unset": map[any]any{"dev-config": ""}, }, ) assert.Equal(t, expectedConfigSetting, devConfigSetting, "unexpected setting for 'dev.config'") @@ -140,7 +140,7 @@ func TestAuthForcesLowercase(t *testing.T) { withTestClient(t, func(config *CmdConfig, tm *tcMocks) { tm.oauth.EXPECT().TokenInfo(gomock.Any()).Return(&do.OAuthTokenInfo{}, nil) - contexts := map[string]interface{}{doctl.ArgDefaultContext: true, "TestCapitalCase": true} + contexts := map[string]any{doctl.ArgDefaultContext: true, "TestCapitalCase": true} context := "TestCapitalCase" viper.Set("auth-contexts", contexts) viper.Set("context", context) @@ -148,7 +148,7 @@ func TestAuthForcesLowercase(t *testing.T) { err := RunAuthInit(retrieveUserTokenFunc)(config) assert.NoError(t, err) - contexts = map[string]interface{}{doctl.ArgDefaultContext: true, "TestCapitalCase": true} + contexts = map[string]any{doctl.ArgDefaultContext: true, "TestCapitalCase": true} viper.Set("auth-contexts", contexts) viper.Set("context", "contextDoesntExist") err = RunAuthSwitch(config) @@ -175,14 +175,14 @@ func Test_displayAuthContexts(t *testing.T) { Name string Out *bytes.Buffer Context string - Contexts map[string]interface{} + Contexts map[string]any Expected string }{ { Name: "default context only", Out: &bytes.Buffer{}, Context: doctl.ArgDefaultContext, - Contexts: map[string]interface{}{ + Contexts: map[string]any{ doctl.ArgDefaultContext: true, }, Expected: "default (current)\n", @@ -191,7 +191,7 @@ func Test_displayAuthContexts(t *testing.T) { Name: "default context and additional context", Out: &bytes.Buffer{}, Context: doctl.ArgDefaultContext, - Contexts: map[string]interface{}{ + Contexts: map[string]any{ doctl.ArgDefaultContext: true, "test": true, }, @@ -201,7 +201,7 @@ func Test_displayAuthContexts(t *testing.T) { Name: "default context and additional context set to additional context", Out: &bytes.Buffer{}, Context: "test", - Contexts: map[string]interface{}{ + Contexts: map[string]any{ doctl.ArgDefaultContext: true, "test": true, }, @@ -211,7 +211,7 @@ func Test_displayAuthContexts(t *testing.T) { Name: "unset context", Out: &bytes.Buffer{}, Context: "missing", - Contexts: map[string]interface{}{ + Contexts: map[string]any{ doctl.ArgDefaultContext: true, "test": true, }, @@ -284,7 +284,7 @@ func TestTokenInputValidator(t *testing.T) { } } -type testConfig map[string]interface{} +type testConfig map[string]any type nopWriteCloser struct { io.Writer diff --git a/commands/certificates.go b/commands/certificates.go index dca418088..154b5ac26 100644 --- a/commands/certificates.go +++ b/commands/certificates.go @@ -41,48 +41,51 @@ Once a certificate has been stored, it is assigned a unique certificate ID that - The name you gave the certificate - A comma-separated list of domain names associated with the certificate - The SHA-1 fingerprint of the certificate -- The certificate's expiration date given in ISO8601 date/time format -- The certificate's creation date given in ISO8601 date/time format +- The certificate's expiration date, in ISO8601 date/time format +- The certificate's creation date, in ISO8601 date/time format - The certificate type (` + "`" + `custom` + "`" + ` or ` + "`" + `lets_encrypt` + "`" + `) - The certificate state (` + "`" + `pending` + "`" + `, ` + "`" + `verified` + "`" + `, or ` + "`" + `error` + "`" + `)` - CmdBuilder(cmd, RunCertificateGet, "get ", "Retrieve details about a certificate", `This command retrieves the following details about a certificate:`+certDetails, Writer, + cmdCertificateGet := CmdBuilder(cmd, RunCertificateGet, "get ", "Retrieve details about a certificate", `This command retrieves the following details about a certificate:`+certDetails, Writer, aliasOpt("g"), displayerType(&displayers.Certificate{})) + cmdCertificateGet.Example = "The following example retrieves the ID, name, and domains associated with a certificate: doctl compute certificate get f81d4fae-7dec-11d0-a765-00a0c91e6bf6 --format ID,Name,DNSNames" + cmdCertificateCreate := CmdBuilder(cmd, RunCertificateCreate, "create", - "Create a new certificate", `This command allows you to create a certificate. There are two supported certificate types: Let's Encrypt certificates, and custom certificates. + "Create a new certificate", `Creates a new Let's Encrypt certificate or adds an existing custom certificate to your team. There are two supported certificate types: Let's Encrypt certificates, and custom certificates. -Let's Encrypt certificates are free and will be auto-renewed and managed for you by DigitalOcean. +Let's Encrypt certificates are free, auto-renewed and managed for you by DigitalOcean. -To create a Let's Encrypt certificate, you'll need to add the domain(s) to your account at cloud.digitalocean.com, or via `+"`"+`doctl compute domain create`+"`"+`, then provide a certificate name and a comma-separated list of the domain names you'd like to associate with the certificate: +To create a Let's Encrypt certificate, you need to add the domain(s) to your account at using the DigitalOcean control panel, or via `+"`"+`doctl compute domain create`+"`"+`, then provide a certificate name and a comma-separated list of the domain names you'd like to associate with the certificate: doctl compute certificate create --type lets_encrypt --name mycert --dns-names example.org -To upload a custom certificate, you'll need to provide a certificate name, the path to the certificate, the path to the private key for the certificate, and the path to the certificate chain, all in PEM format: +To upload a custom certificate, you need to provide a certificate name, the path to the certificate, the path to the certificate's private key, and the path to the certificate chain, all in PEM format: doctl compute certificate create --type custom --name mycert --leaf-certificate-path cert.pem --certificate-chain-path fullchain.pem --private-key-path privkey.pem`, Writer, aliasOpt("c")) AddStringFlag(cmdCertificateCreate, doctl.ArgCertificateName, "", "", - "Certificate name", requiredOpt()) + "A user-specified name for the certificate.", requiredOpt()) AddStringSliceFlag(cmdCertificateCreate, doctl.ArgCertificateDNSNames, "", []string{}, "Comma-separated list of domains for which the certificate will be issued. The domains must be managed using DigitalOcean's DNS.") AddStringFlag(cmdCertificateCreate, doctl.ArgPrivateKeyPath, "", "", - "The path to a PEM-formatted private-key corresponding to the SSL certificate.") + "The path on your local machine to a PEM-formatted private-key corresponding to the SSL certificate.") AddStringFlag(cmdCertificateCreate, doctl.ArgLeafCertificatePath, "", "", - "The path to a PEM-formatted public SSL certificate.") + "The path on your local machine to a PEM-formatted public SSL certificate.") AddStringFlag(cmdCertificateCreate, doctl.ArgCertificateChainPath, "", "", - "The path to a full PEM-formatted trust chain between the certificate authority's certificate and your domain's SSL certificate.") + "The path on your local machine to a full PEM-formatted trust chain between the certificate authority's certificate and your domain's SSL certificate.") AddStringFlag(cmdCertificateCreate, doctl.ArgCertificateType, "", "", - "Certificate type [custom|lets_encrypt]") + "The type of certificate, `custom` or `lets_encrypt`.") - CmdBuilder(cmd, RunCertificateList, "list", "Retrieve list of the account's stored certificates", `This command retrieves a list of all certificates associated with the account. The following details are shown for each certificate:`+certDetails, Writer, + cmdCertificateList := CmdBuilder(cmd, RunCertificateList, "list", "Retrieve list of the account's stored certificates", `This command retrieves a list of all certificates associated with the account. The following details are shown for each certificate:`+certDetails, Writer, aliasOpt("ls"), displayerType(&displayers.Certificate{})) + cmdCertificateList.Example = `The following example retrieves a list of all certificates associated with your account and uses the ` + "`" + `--format` + "`" + ` flag return only the IDs, names, and the domains associated with each ticket: doctl compute certificate list --format ID,Name,DNSNames` cmdCertificateDelete := CmdBuilder(cmd, RunCertificateDelete, "delete ", - "Delete the specified certificate", `This command deletes the specified certificate. + "Delete the specified certificate", `Deletes the specified certificate. Use `+"`"+`doctl compute certificate list`+"`"+` to see all available certificates associated with your account.`, Writer, aliasOpt("d", "rm")) AddBoolFlag(cmdCertificateDelete, doctl.ArgForce, doctl.ArgShortForce, false, "Delete the certificate without a confirmation prompt") - + cmdCertificateDelete.Example = `The following example deletes the certificate with the ID ` + "`" + `f81d4fae-7dec-11d0-a765-00a0c91e6bf6` + "`" + `: doctl compute certificate delete f81d4fae-7dec-11d0-a765-00a0c91e6bf6` return cmd } diff --git a/commands/databases.go b/commands/databases.go index 8f0a76566..fc97cad21 100644 --- a/commands/databases.go +++ b/commands/databases.go @@ -17,6 +17,7 @@ import ( "errors" "fmt" "os" + "strconv" "strings" "time" @@ -148,6 +149,7 @@ In addition, for PostgreSQL and MySQL clusters, you can provide a disk size in M cmd.AddCommand(databaseFirewalls()) cmd.AddCommand(databaseOptions()) cmd.AddCommand(databaseConfiguration()) + cmd.AddCommand(databaseTopic()) return cmd } @@ -1554,6 +1556,355 @@ func RunDatabaseSetSQLModes(c *CmdConfig) error { return c.Databases().SetSQLMode(databaseID, sqlModes...) } +func RunDatabaseTopicList(c *CmdConfig) error { + if len(c.Args) == 0 { + return doctl.NewMissingArgsErr(c.NS) + } + + databaseID := c.Args[0] + topics, err := c.Databases().ListTopics(databaseID) + if err != nil { + return err + } + item := &displayers.DatabaseKafkaTopics{DatabaseTopics: topics} + return c.Display(item) +} + +func RunDatabaseTopicGet(c *CmdConfig) error { + if len(c.Args) < 2 { + return doctl.NewMissingArgsErr(c.NS) + } + + databaseID := c.Args[0] + topicName := c.Args[1] + topic, err := c.Databases().GetTopic(databaseID, topicName) + if err != nil { + return err + } + + item := &displayers.DatabaseKafkaTopic{DatabaseTopic: *topic} + return c.Display(item) +} + +func RunDatabaseTopicListPartition(c *CmdConfig) error { + if len(c.Args) < 2 { + return doctl.NewMissingArgsErr(c.NS) + } + + databaseID := c.Args[0] + topicName := c.Args[1] + topic, err := c.Databases().GetTopic(databaseID, topicName) + if err != nil { + return err + } + + item := &displayers.DatabaseKafkaTopicPartitions{DatabaseTopicPartitions: topic.Partitions} + return c.Display(item) +} + +func RunDatabaseTopicDelete(c *CmdConfig) error { + if len(c.Args) < 2 { + return doctl.NewMissingArgsErr(c.NS) + } + + force, err := c.Doit.GetBool(c.NS, doctl.ArgForce) + if err != nil { + return err + } + + if force || AskForConfirmDelete("kafka topic", 1) == nil { + databaseID := c.Args[0] + topicName := c.Args[1] + return c.Databases().DeleteTopic(databaseID, topicName) + } + + return errOperationAborted +} + +func RunDatabaseTopicCreate(c *CmdConfig) error { + if len(c.Args) < 2 { + return doctl.NewMissingArgsErr(c.NS) + } + + databaseID := c.Args[0] + topicName := c.Args[1] + + createReq := &godo.DatabaseCreateTopicRequest{Name: topicName} + + pc, err := c.Doit.GetInt(c.NS, doctl.ArgDatabaseTopicPartitionCount) + if err == nil && pc != 0 { + pcUInt32 := uint32(pc) + createReq.PartitionCount = &pcUInt32 + } + rf, err := c.Doit.GetInt(c.NS, doctl.ArgDatabaseTopicReplicationFactor) + if err == nil && rf != 0 { + rfUInt32 := uint32(rf) + createReq.ReplicationFactor = &rfUInt32 + } + createReq.Config = getDatabaseTopicConfigArgs(c) + + _, err = c.Databases().CreateTopic(databaseID, createReq) + return err +} + +func RunDatabaseTopicUpdate(c *CmdConfig) error { + if len(c.Args) < 2 { + return doctl.NewMissingArgsErr(c.NS) + } + + databaseID := c.Args[0] + topicName := c.Args[1] + + updateReq := &godo.DatabaseUpdateTopicRequest{} + + pc, err := c.Doit.GetInt(c.NS, doctl.ArgDatabaseTopicPartitionCount) + if err == nil && pc != 0 { + pcUInt32 := uint32(pc) + updateReq.PartitionCount = &pcUInt32 + } + rf, err := c.Doit.GetInt(c.NS, doctl.ArgDatabaseTopicReplicationFactor) + if err == nil && rf != 0 { + rfUInt32 := uint32(rf) + updateReq.ReplicationFactor = &rfUInt32 + } + updateReq.Config = getDatabaseTopicConfigArgs(c) + + err = c.Databases().UpdateTopic(databaseID, topicName, updateReq) + return err +} + +func getDatabaseTopicConfigArgs(c *CmdConfig) *godo.TopicConfig { + res := &godo.TopicConfig{} + val, err := c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicCleanupPolicy) + if err == nil { + res.CleanupPolicy = val + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicCompressionType) + if err == nil && val != "" { + res.CompressionType = val + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicDeleteRetentionMS) + if err == nil && val != "" { + i, err := strconv.ParseUint(val, 10, 64) + if err == nil { + res.DeleteRetentionMS = &i + } + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicFileDeleteDelayMS) + if err == nil && val != "" { + i, err := strconv.ParseUint(val, 10, 64) + if err == nil { + res.FileDeleteDelayMS = &i + } + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicFlushMessages) + if err == nil && val != "" { + i, err := strconv.ParseUint(val, 10, 64) + if err == nil { + res.FlushMessages = &i + } + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicFlushMS) + if err == nil && val != "" { + i, err := strconv.ParseUint(val, 10, 64) + if err == nil { + res.FlushMS = &i + } + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicIntervalIndexBytes) + if err == nil && val != "" { + i, err := strconv.ParseUint(val, 10, 64) + if err == nil { + res.IndexIntervalBytes = &i + } + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicMaxCompactionLagMS) + if err == nil && val != "" { + i, err := strconv.ParseUint(val, 10, 64) + if err == nil { + res.MaxCompactionLagMS = &i + } + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicMaxMessageBytes) + if err == nil && val != "" { + i, err := strconv.ParseUint(val, 10, 64) + if err == nil { + res.MaxMessageBytes = &i + } + } + bVal, err := c.Doit.GetBoolPtr(c.NS, doctl.ArgDatabaseTopicMesssageDownConversionEnable) + if err == nil && bVal != nil { + res.MessageDownConversionEnable = bVal + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicMessageFormatVersion) + if err == nil && val != "" { + res.MessageFormatVersion = val + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicMessageTimestampType) + if err == nil && val != "" { + res.MessageTimestampType = val + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicMinCleanableDirtyRatio) + if err == nil && val != "" { + i, err := strconv.ParseFloat(val, 32) + if err == nil { + iFloat32 := float32(i) + res.MinCleanableDirtyRatio = &iFloat32 + } + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicMinCompactionLagMS) + if err == nil && val != "" { + i, err := strconv.ParseUint(val, 10, 64) + if err == nil { + res.MinCompactionLagMS = &i + } + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicMinInsyncReplicas) + if err == nil && val != "" { + i, err := strconv.ParseUint(val, 10, 32) + if err == nil { + iUint32 := uint32(i) + res.MinInsyncReplicas = &iUint32 + } + } + bVal, err = c.Doit.GetBoolPtr(c.NS, doctl.ArgDatabaseTopicPreallocate) + if err == nil && bVal != nil { + res.Preallocate = bVal + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicRetentionBytes) + if err == nil && val != "" { + i, err := strconv.ParseInt(val, 10, 64) + if err == nil { + res.RetentionBytes = &i + } + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicRetentionMS) + if err == nil && val != "" { + i, err := strconv.ParseInt(val, 10, 64) + if err == nil { + res.RetentionMS = &i + } + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicSegmentBytes) + if err == nil && val != "" { + i, err := strconv.ParseUint(val, 10, 64) + if err == nil { + res.SegmentBytes = &i + } + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicSegmentJitterMS) + if err == nil && val != "" { + i, err := strconv.ParseUint(val, 10, 64) + if err == nil { + res.SegmentJitterMS = &i + } + } + val, err = c.Doit.GetString(c.NS, doctl.ArgDatabaseTopicSegmentMS) + if err == nil && val != "" { + i, err := strconv.ParseUint(val, 10, 64) + if err == nil { + res.SegmentMS = &i + } + } + + return res +} + +func databaseTopic() *Command { + cmd := &Command{ + Command: &cobra.Command{ + Use: "topics", + Short: `Display commands to manage topics for kafka database clusters`, + Long: `The subcommands under ` + "`" + `doctl databases topics` + "`" + ` enable the management of topics for kafka database clusters`, + }, + } + + topicListDetails := ` +This command lists the following details for each topic in a kafka database cluster: + + - The Name of the topic. + - The State of the topic. + - The Replication Factor of the topic - number of brokers the topic's partitions are replicated across. + ` + + topicGetDetails := ` +This command lists the following details for a given topic in a kafka database cluster: + + - The Name of the topic. + - The Partitions of the topic - the number of partitions in the topics + - The Replication Factor of the topic - number of brokers the topic's partitions are replicated across. + - Additional advanced configuration for the topic. + +The details of the topic are listed in key/value pairs + ` + topicGetPartitionDetails := ` +This command lists the following details for each partition of a given topic in a kafka database cluster: + + - The Id - identifier of the topic partition. + - The Size - size of the topic partition, in bytes. + - The InSyncReplicas - number of brokers that are in sync with the partition leader. + - The EarliestOffset - earliest offset read amongst all consumers of the partition. + ` + + CmdBuilder(cmd, RunDatabaseTopicList, "list ", "Retrieve a list of topics for a given kafka database", topicListDetails, Writer, displayerType(&displayers.DatabaseKafkaTopics{}), aliasOpt("ls")) + CmdBuilder(cmd, RunDatabaseTopicGet, "get ", "Retrieve the configuration for a given kafka topic", topicGetDetails, Writer, displayerType(&displayers.DatabaseKafkaTopic{}), aliasOpt("g")) + CmdBuilder(cmd, RunDatabaseTopicListPartition, "partitions ", "Retrieve the partitions for a given kafka topic", topicGetPartitionDetails, Writer, aliasOpt("p")) + cmdDatabaseTopicDelete := CmdBuilder(cmd, RunDatabaseTopicDelete, "delete ", "Deletes a kafka topic by topic name", "", Writer, aliasOpt("rm")) + AddBoolFlag(cmdDatabaseTopicDelete, doctl.ArgForce, doctl.ArgShortForce, false, "Deletes the kafka topic without a confirmation prompt") + cmdDatabaseTopicCreate := CmdBuilder(cmd, RunDatabaseTopicCreate, "create ", "Creates a topic for a given kafka database", + "This command creates a kafka topic for the specified kafka database cluster, giving it the specified name. Example: doctl databases topics create --replication_factor 2 --partition_count 4", Writer, aliasOpt("c")) + cmdDatabaseTopicUpdate := CmdBuilder(cmd, RunDatabaseTopicUpdate, "update ", "Updates a topic for a given kafka database", + "This command updates a kafka topic for the specified kafka database cluster. Example: doctl databases topics update ", Writer, aliasOpt("u")) + cmdsWithConfig := []*Command{cmdDatabaseTopicCreate, cmdDatabaseTopicUpdate} + for _, c := range cmdsWithConfig { + AddIntFlag(c, doctl.ArgDatabaseTopicReplicationFactor, "", 2, "Specifies the number of nodes to replicate data across the kafka cluster") + AddIntFlag(c, doctl.ArgDatabaseTopicPartitionCount, "", 1, "Specifies the number of partitions available for the topic") + AddStringFlag(c, doctl.ArgDatabaseTopicCleanupPolicy, "", "delete", + "Specifies the retention policy to use on log segments: Possible values are 'delete', 'compact_delete', 'compact'") + AddStringFlag(c, doctl.ArgDatabaseTopicCompressionType, "", "producer", + "Specifies the compression type for a kafka topic: Possible values are 'producer', 'gzip', 'snappy', 'Iz4', 'zstd', 'uncompressed'") + AddStringFlag(c, doctl.ArgDatabaseTopicDeleteRetentionMS, "", "", + "Specifies how long (in ms) to retain delete tombstone markers for topics") + AddStringFlag(c, doctl.ArgDatabaseTopicFileDeleteDelayMS, "", "", + "Specifies the minimum time (in ms) to wait before deleting a file from the filesystem") + AddStringFlag(c, doctl.ArgDatabaseTopicFlushMessages, "", "", + "Specifies the maximum number of messages to accumulate on a log partition before messages are flushed to disk") + AddStringFlag(c, doctl.ArgDatabaseTopicFlushMS, "", "", + "Specifies the maximum time (in ms) that a message is kept in memory before being flushed to disk") + AddStringFlag(c, doctl.ArgDatabaseTopicIntervalIndexBytes, "", "", + "Specifies the number of bytes between entries being added into the offset index") + AddStringFlag(c, doctl.ArgDatabaseTopicMaxCompactionLagMS, "", "", + "Specifies the maximum time (in ms) that a message will remain uncompacted. This is only applicable if the logs have compaction enabled") + AddStringFlag(c, doctl.ArgDatabaseTopicMaxMessageBytes, "", "", + "Specifies the largest record batch (in bytes) that can be sent to the server. This is calculated after compression, if compression is enabled") + AddBoolFlag(c, doctl.ArgDatabaseTopicMesssageDownConversionEnable, "", true, + "Specifies whether down-conversion of message formats is enabled to satisfy consumer requests") + AddStringFlag(c, doctl.ArgDatabaseTopicMessageFormatVersion, "", "", + "Specifies the message format version used by the broker to append messages to the logs. By setting a format version, all existing messages on disk must be smaller or equal to the specified version") + AddStringFlag(c, doctl.ArgDatabaseTopicMessageTimestampType, "", "", + "Specifies whether to use the create time or log append time as the timestamp on a message") + AddStringFlag(c, doctl.ArgDatabaseTopicMinCleanableDirtyRatio, "", "", + "Specifies the frequenty of log compaction (if enabled) in relation to duplicates present in the logs. For example, 0.5 would mean at most half of the log could be duplicates before compaction would begin") + AddStringFlag(c, doctl.ArgDatabaseTopicMinCompactionLagMS, "", "", + "Specifies the minimum time (in ms) that a message will remain uncompacted. This is only applicable if the logs have compaction enabled") + AddStringFlag(c, doctl.ArgDatabaseTopicMinInsyncReplicas, "", "", + "Specifies the minimum number of replicas that must ACK a write for it to be considered successful") + AddBoolFlag(c, doctl.ArgDatabaseTopicPreallocate, "", false, + "Specifies whether a file should be preallocated on disk when creating a new log segment") + AddStringFlag(c, doctl.ArgDatabaseTopicRetentionBytes, "", "", + "Specifies the maximum size (in bytes) before deleting messages. '-1' indicates that there is no limit") + AddStringFlag(c, doctl.ArgDatabaseTopicRetentionMS, "", "", + "Specifies the maximum time (in ms) to store a message before deleting it. '-1' indicates that there is no limit") + AddStringFlag(c, doctl.ArgDatabaseTopicSegmentBytes, "", "", + "Specifies the maximum size (in bytes) of a single log file") + AddStringFlag(c, doctl.ArgDatabaseTopicSegmentJitterMS, "", "", + "Specifies the maximum time (in ms) for random jitter that is subtracted from the scheduled segment roll time to avoid thundering herd problems") + AddStringFlag(c, doctl.ArgDatabaseTopicSegmentMS, "", "", + "Specifies the maximum time (in ms) to wait to force a log roll if the segment file isn't full. After this period, the log will be forced to roll") + } + return cmd +} + func databaseFirewalls() *Command { cmd := &Command{ Command: &cobra.Command{ @@ -1895,20 +2246,20 @@ func databaseConfiguration() *Command { cmd := &Command{ Command: &cobra.Command{ Use: "configuration", - Aliases: []string{"cfg"}, + Aliases: []string{"cfg", "config"}, Short: "View the configuration of a database cluster given its ID and Engine", Long: "The subcommands of `doctl databases configuration` are used to view a database cluster's configuration.", }, } - getMySQLConfigurationLongDesc := ` - This will get a database cluster's configuration given its ID and Engine - ` - getMySQLCfgCommand := CmdBuilder( + getConfigurationLongDesc := "This will get a database cluster's configuration given its ID and Engine" + updateConfigurationLongDesc := "This will update a database cluster's configuration given its ID and Engine and Desired Configuration (as JSON string)" + + getDatabaseCfgCommand := CmdBuilder( cmd, RunDatabaseConfigurationGet, "get ", "Get a database cluster's configuration", - getMySQLConfigurationLongDesc, + getConfigurationLongDesc, Writer, aliasOpt("g"), displayerType(&displayers.MySQLConfiguration{}), @@ -1916,7 +2267,7 @@ func databaseConfiguration() *Command { displayerType(&displayers.RedisConfiguration{}), ) AddStringFlag( - getMySQLCfgCommand, + getDatabaseCfgCommand, doctl.ArgDatabaseEngine, "e", "", @@ -1924,6 +2275,32 @@ func databaseConfiguration() *Command { requiredOpt(), ) + updateDatabaseCfgCommand := CmdBuilder( + cmd, + RunDatabaseConfigurationUpdate, + "update ", + "Update a database cluster's configuration", + updateConfigurationLongDesc, + Writer, + aliasOpt("u"), + ) + AddStringFlag( + updateDatabaseCfgCommand, + doctl.ArgDatabaseEngine, + "e", + "", + "the engine of the database you want to update the configuration for", + requiredOpt(), + ) + AddStringFlag( + updateDatabaseCfgCommand, + doctl.ArgDatabaseConfigJson, + "", + "{}", + "the desired configuration of the database cluster you want to update", + requiredOpt(), + ) + return cmd } @@ -1984,3 +2361,51 @@ func RunDatabaseConfigurationGet(c *CmdConfig) error { } return nil } + +func RunDatabaseConfigurationUpdate(c *CmdConfig) error { + args := c.Args + if len(args) == 0 { + return doctl.NewMissingArgsErr(c.NS) + } + if len(args) > 1 { + return doctl.NewTooManyArgsErr(c.NS) + } + + engine, err := c.Doit.GetString(c.NS, doctl.ArgDatabaseEngine) + if err != nil { + return doctl.NewMissingArgsErr(c.NS) + } + + allowedEngines := map[string]any{ + "mysql": nil, + "pg": nil, + "redis": nil, + } + if _, ok := allowedEngines[engine]; !ok { + return fmt.Errorf("(%s) command: engine must be one of: 'pg', 'mysql', 'redis'", c.NS) + } + + configJson, err := c.Doit.GetString(c.NS, doctl.ArgDatabaseConfigJson) + if err != nil { + return doctl.NewMissingArgsErr(c.NS) + } + + dbId := args[0] + if engine == "mysql" { + err := c.Databases().UpdateMySQLConfiguration(dbId, configJson) + if err != nil { + return err + } + } else if engine == "pg" { + err := c.Databases().UpdatePostgreSQLConfiguration(dbId, configJson) + if err != nil { + return err + } + } else if engine == "redis" { + err := c.Databases().UpdateRedisConfiguration(dbId, configJson) + if err != nil { + return err + } + } + return nil +} diff --git a/commands/databases_test.go b/commands/databases_test.go index 2d9de8748..a6fae5600 100644 --- a/commands/databases_test.go +++ b/commands/databases_test.go @@ -66,6 +66,21 @@ var ( }, } + testKafkaDBCluster = do.Database{ + Database: &godo.Database{ + ID: "ea93928g-8se0-929e-m1ns-029daj2k3j12", + Name: "kafka-db-cluster", + RegionSlug: "nyc1", + EngineSlug: "kafka", + VersionSlug: "3.5", + NumNodes: 3, + SizeSlug: "db-s-2vcpu-4gb", + CreatedAt: time.Now(), + Status: "online", + Tags: []string{"testing"}, + }, + } + testDBBackUpCluster = do.Database{ Database: &godo.Database{ ID: "ea4652de-4fe0-11e9-b7ab-df1ef30eab9e", @@ -190,6 +205,36 @@ var ( RedisConfig: &godo.RedisConfig{}, } + topicReplicationFactor = uint32(3) + testKafkaTopic = do.DatabaseTopic{ + DatabaseTopic: &godo.DatabaseTopic{ + Name: "topic1", + State: "active", + Config: &godo.TopicConfig{ + CleanupPolicy: "delete", + }, + Partitions: []*godo.TopicPartition{ + { + Id: 0, + Size: 4096, + EarliestOffset: 0, + InSyncReplicas: 2, + }, + { + Id: 1, + Size: 4096, + EarliestOffset: 4, + InSyncReplicas: 2, + }, + }, + ReplicationFactor: &topicReplicationFactor, + }, + } + + testKafkaTopics = do.DatabaseTopics{ + testKafkaTopic, + } + errTest = errors.New("error") ) @@ -215,6 +260,7 @@ func TestDatabasesCommand(t *testing.T) { "db", "sql-mode", "configuration", + "topics", ) } @@ -288,7 +334,20 @@ func TestDatabaseOptionsCommand(t *testing.T) { func TestDatabaseConfigurationCommand(t *testing.T) { cmd := databaseConfiguration() assert.NotNil(t, cmd) - assertCommandNames(t, cmd, "get") + assertCommandNames(t, cmd, "get", "update") +} + +func TestDatabaseKafkaTopicCommand(t *testing.T) { + cmd := databaseTopic() + assert.NotNil(t, cmd) + assertCommandNames(t, cmd, + "get", + "list", + "delete", + "create", + "update", + "partitions", + ) } func TestDatabasesGet(t *testing.T) { @@ -560,6 +619,155 @@ func TestDatabaseListBackups(t *testing.T) { }) } +func TestDatabaseListKafkaTopics(t *testing.T) { + // Success + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + tm.databases.EXPECT().ListTopics(testKafkaDBCluster.ID).Return(testKafkaTopics, nil) + config.Args = append(config.Args, testKafkaDBCluster.ID) + + err := RunDatabaseTopicList(config) + assert.NoError(t, err) + }) + + // Error + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + tm.databases.EXPECT().ListTopics(testKafkaDBCluster.ID).Return(nil, errTest) + config.Args = append(config.Args, testKafkaDBCluster.ID) + + err := RunDatabaseTopicList(config) + assert.EqualError(t, err, errTest.Error()) + }) +} + +func TestDatabaseGetKafkaTopic(t *testing.T) { + // Success + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + tm.databases.EXPECT().GetTopic(testKafkaDBCluster.ID, testKafkaTopic.Name).Return(&testKafkaTopic, nil) + config.Args = append(config.Args, testKafkaDBCluster.ID, testKafkaTopic.Name) + + err := RunDatabaseTopicGet(config) + assert.NoError(t, err) + }) + + // Error + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + tm.databases.EXPECT().GetTopic(testKafkaDBCluster.ID, testKafkaTopic.Name).Return(nil, errTest) + config.Args = append(config.Args, testKafkaDBCluster.ID, testKafkaTopic.Name) + + err := RunDatabaseTopicGet(config) + assert.EqualError(t, err, errTest.Error()) + }) +} + +func TestDatabaseCreateKafkaTopic(t *testing.T) { + // Success - only topic name + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + createReq := &godo.DatabaseCreateTopicRequest{ + Name: testKafkaTopic.Name, + Config: &godo.TopicConfig{}, + } + tm.databases.EXPECT().CreateTopic(testKafkaDBCluster.ID, createReq).Return(&testKafkaTopic, nil) + config.Args = append(config.Args, testKafkaDBCluster.ID, testKafkaTopic.Name) + + err := RunDatabaseTopicCreate(config) + assert.NoError(t, err) + }) + // Success - with additional config + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + pc := uint32(len(testKafkaTopic.Partitions)) + createReq := &godo.DatabaseCreateTopicRequest{ + Name: testKafkaTopic.Name, + ReplicationFactor: testKafkaTopic.ReplicationFactor, + PartitionCount: &pc, + Config: &godo.TopicConfig{ + CleanupPolicy: testKafkaTopic.Config.CleanupPolicy, + }, + } + tm.databases.EXPECT().CreateTopic(testKafkaDBCluster.ID, createReq).Return(&testKafkaTopic, nil) + config.Args = append(config.Args, testKafkaDBCluster.ID, testKafkaTopic.Name) + config.Doit.Set(config.NS, doctl.ArgDatabaseTopicPartitionCount, pc) + config.Doit.Set(config.NS, doctl.ArgDatabaseTopicReplicationFactor, testKafkaTopic.ReplicationFactor) + config.Doit.Set(config.NS, doctl.ArgDatabaseTopicCleanupPolicy, testKafkaTopic.Config.CleanupPolicy) + + err := RunDatabaseTopicCreate(config) + assert.NoError(t, err) + }) + // Error + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + tm.databases.EXPECT().CreateTopic(testKafkaDBCluster.ID, gomock.AssignableToTypeOf(&godo.DatabaseCreateTopicRequest{})).Return(nil, errTest) + config.Args = append(config.Args, testKafkaDBCluster.ID, testKafkaTopic.Name) + + err := RunDatabaseTopicCreate(config) + assert.EqualError(t, err, errTest.Error()) + }) +} + +func TestDatabaseUpdateKafkaTopic(t *testing.T) { + // Success - only partition count + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + currPC := uint32(len(testKafkaTopic.Partitions)) + newPC := currPC + 1 + updateReq := &godo.DatabaseUpdateTopicRequest{ + PartitionCount: &newPC, + Config: &godo.TopicConfig{}, + } + tm.databases.EXPECT().UpdateTopic(testKafkaDBCluster.ID, testKafkaTopic.Name, updateReq).Return(nil) + config.Args = append(config.Args, testKafkaDBCluster.ID, testKafkaTopic.Name) + config.Doit.Set(config.NS, doctl.ArgDatabaseTopicPartitionCount, newPC) + + err := RunDatabaseTopicUpdate(config) + assert.NoError(t, err) + }) + // Success - with additional config + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + currPC := uint32(len(testKafkaTopic.Partitions)) + newPC := currPC + 1 + updateReq := &godo.DatabaseUpdateTopicRequest{ + PartitionCount: &newPC, + Config: &godo.TopicConfig{ + CleanupPolicy: testKafkaTopic.Config.CleanupPolicy, + }, + } + tm.databases.EXPECT().UpdateTopic(testKafkaDBCluster.ID, testKafkaTopic.Name, updateReq).Return(nil) + config.Args = append(config.Args, testKafkaDBCluster.ID, testKafkaTopic.Name) + config.Doit.Set(config.NS, doctl.ArgDatabaseTopicPartitionCount, newPC) + config.Doit.Set(config.NS, doctl.ArgDatabaseTopicCleanupPolicy, testKafkaTopic.Config.CleanupPolicy) + + err := RunDatabaseTopicUpdate(config) + assert.NoError(t, err) + }) + // Error + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + tm.databases.EXPECT().UpdateTopic(testKafkaDBCluster.ID, testKafkaTopic.Name, gomock.AssignableToTypeOf(&godo.DatabaseUpdateTopicRequest{})).Return(errTest) + config.Args = append(config.Args, testKafkaDBCluster.ID, testKafkaTopic.Name) + + err := RunDatabaseTopicUpdate(config) + assert.EqualError(t, err, errTest.Error()) + }) +} + +func TestDatabaseDeleteKafkaTopic(t *testing.T) { + // Successful + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + tm.databases.EXPECT().DeleteTopic(testKafkaDBCluster.ID, testKafkaTopic.Name).Return(nil) + config.Args = append(config.Args, testKafkaDBCluster.ID, testKafkaTopic.Name) + config.Doit.Set(config.NS, doctl.ArgForce, "true") + + err := RunDatabaseTopicDelete(config) + assert.NoError(t, err) + }) + // Error + withTestClient(t, func(config *CmdConfig, tm *tcMocks) { + tm.databases.EXPECT().DeleteTopic(testKafkaDBCluster.ID, testKafkaTopic.Name).Return(errTest) + + config.Args = append(config.Args, testKafkaDBCluster.ID, testKafkaTopic.Name) + config.Doit.Set(config.NS, doctl.ArgForce, "true") + + err := RunDatabaseTopicDelete(config) + assert.EqualError(t, err, errTest.Error()) + }) +} + func TestDatabaseConnectionGet(t *testing.T) { // Success withTestClient(t, func(config *CmdConfig, tm *tcMocks) { @@ -1207,7 +1415,7 @@ func TestDatabaseGetSQLModes(t *testing.T) { } func TestDatabaseSetSQLModes(t *testing.T) { - testSQLModesInterface := make([]interface{}, 0, len(testSQLModes)) + testSQLModesInterface := make([]any, 0, len(testSQLModes)) for _, sqlMode := range testSQLModes { testSQLModesInterface = append(testSQLModesInterface, sqlMode) } diff --git a/commands/displayers/1_click.go b/commands/displayers/1_click.go index 9cde4f4ee..6e5c9bfa6 100644 --- a/commands/displayers/1_click.go +++ b/commands/displayers/1_click.go @@ -48,11 +48,11 @@ func (oc *OneClick) ColMap() map[string]string { } // KV maps the values of a 1-click to an output -func (oc *OneClick) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(oc.OneClicks)) +func (oc *OneClick) KV() []map[string]any { + out := make([]map[string]any, 0, len(oc.OneClicks)) for _, oneClick := range oc.OneClicks { - o := map[string]interface{}{ + o := map[string]any{ "SLUG": oneClick.Slug, "TYPE": oneClick.Type, } diff --git a/commands/displayers/account.go b/commands/displayers/account.go index f274fcf0a..f6c80bba3 100644 --- a/commands/displayers/account.go +++ b/commands/displayers/account.go @@ -42,8 +42,8 @@ func (a *Account) ColMap() map[string]string { } } -func (a *Account) KV() []map[string]interface{} { - x := map[string]interface{}{ +func (a *Account) KV() []map[string]any { + x := map[string]any{ "Email": a.Email, "DropletLimit": a.DropletLimit, "EmailVerified": a.EmailVerified, "UUID": a.UUID, "Status": a.Status, @@ -53,5 +53,5 @@ func (a *Account) KV() []map[string]interface{} { x["TeamUUID"] = a.Team.UUID } - return []map[string]interface{}{x} + return []map[string]any{x} } diff --git a/commands/displayers/action.go b/commands/displayers/action.go index 92dce15f7..296410b6c 100644 --- a/commands/displayers/action.go +++ b/commands/displayers/action.go @@ -43,15 +43,15 @@ func (a *Action) ColMap() map[string]string { } } -func (a *Action) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(a.Actions)) +func (a *Action) KV() []map[string]any { + out := make([]map[string]any, 0, len(a.Actions)) for _, x := range a.Actions { region := "" if x.Region != nil { region = x.Region.Slug } - o := map[string]interface{}{ + o := map[string]any{ "ID": x.ID, "Status": x.Status, "Type": x.Type, "StartedAt": x.StartedAt, "CompletedAt": x.CompletedAt, "ResourceID": x.ResourceID, "ResourceType": x.ResourceType, diff --git a/commands/displayers/activations.go b/commands/displayers/activations.go index 07a6d826e..f38461df1 100644 --- a/commands/displayers/activations.go +++ b/commands/displayers/activations.go @@ -51,11 +51,11 @@ func (a *Activation) JSON(out io.Writer) error { } // KV implements Displayable -func (a *Activation) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(a.Activations)) +func (a *Activation) KV() []map[string]any { + out := make([]map[string]any, 0, len(a.Activations)) for _, actv := range a.Activations { - o := map[string]interface{}{ + o := map[string]any{ "Datetime": time.UnixMilli(actv.Start).Format("01/02 03:04:05"), "Status": GetActivationStatus(actv.StatusCode), "Kind": getActivationAnnotationValue(actv, "kind"), @@ -117,7 +117,7 @@ func GetActivationPackageName(a whisk.Activation) string { return "" } -func getActivationAnnotationValue(a whisk.Activation, key string) interface{} { +func getActivationAnnotationValue(a whisk.Activation, key string) any { if a.Annotations == nil { return nil } diff --git a/commands/displayers/apps.go b/commands/displayers/apps.go index db6953abf..5a86b39ab 100644 --- a/commands/displayers/apps.go +++ b/commands/displayers/apps.go @@ -38,8 +38,8 @@ func (a Apps) ColMap() map[string]string { } } -func (a Apps) KV() []map[string]interface{} { - out := make([]map[string]interface{}, len(a)) +func (a Apps) KV() []map[string]any { + out := make([]map[string]any, len(a)) for i, app := range a { var ( @@ -55,7 +55,7 @@ func (a Apps) KV() []map[string]interface{} { inProgressDeploymentID = app.InProgressDeployment.ID } - out[i] = map[string]interface{}{ + out[i] = map[string]any{ "ID": app.ID, "Spec.Name": app.Spec.Name, "DefaultIngress": app.DefaultIngress, @@ -98,8 +98,8 @@ func (d Deployments) ColMap() map[string]string { } } -func (d Deployments) KV() []map[string]interface{} { - out := make([]map[string]interface{}, len(d)) +func (d Deployments) KV() []map[string]any { + out := make([]map[string]any, len(d)) for i, deployment := range d { var progress string @@ -111,7 +111,7 @@ func (d Deployments) KV() []map[string]interface{} { } } - out[i] = map[string]interface{}{ + out[i] = map[string]any{ "ID": deployment.ID, "Cause": deployment.Cause, "Progress": progress, @@ -156,11 +156,11 @@ func (r AppRegions) ColMap() map[string]string { } } -func (r AppRegions) KV() []map[string]interface{} { - out := make([]map[string]interface{}, len(r)) +func (r AppRegions) KV() []map[string]any { + out := make([]map[string]any, len(r)) for i, region := range r { - out[i] = map[string]interface{}{ + out[i] = map[string]any{ "Slug": region.Slug, "Label": region.Label, "Continent": region.Continent, @@ -201,12 +201,12 @@ func (t AppTiers) ColMap() map[string]string { } } -func (t AppTiers) KV() []map[string]interface{} { - out := make([]map[string]interface{}, len(t)) +func (t AppTiers) KV() []map[string]any { + out := make([]map[string]any, len(t)) for i, tier := range t { egressBandwidth, _ := strconv.ParseUint(tier.EgressBandwidthBytes, 10, 64) - out[i] = map[string]interface{}{ + out[i] = map[string]any{ "Name": tier.Name, "Slug": tier.Slug, "EgressBandwidthBytes": BytesToHumanReadableUnitBinary(egressBandwidth), @@ -252,8 +252,8 @@ func (is AppInstanceSizes) ColMap() map[string]string { } } -func (is AppInstanceSizes) KV() []map[string]interface{} { - out := make([]map[string]interface{}, len(is)) +func (is AppInstanceSizes) KV() []map[string]any { + out := make([]map[string]any, len(is)) for i, instanceSize := range is { memory, _ := strconv.ParseUint(instanceSize.MemoryBytes, 10, 64) @@ -269,7 +269,7 @@ func (is AppInstanceSizes) KV() []map[string]interface{} { upgradeDowngradePath = upgradeDowngradePath + " -> " + instanceSize.TierUpgradeTo } - out[i] = map[string]interface{}{ + out[i] = map[string]any{ "Name": instanceSize.Name, "Slug": instanceSize.Slug, "CPUs": cpus, @@ -327,7 +327,7 @@ func (r AppProposeResponse) ColMap() map[string]string { } } -func (r AppProposeResponse) KV() []map[string]interface{} { +func (r AppProposeResponse) KV() []map[string]any { existingStatic, _ := strconv.ParseInt(r.Res.ExistingStaticApps, 10, 64) maxFreeStatic, _ := strconv.ParseInt(r.Res.MaxFreeStaticApps, 10, 64) var paidStatic int64 @@ -352,7 +352,7 @@ func (r AppProposeResponse) KV() []map[string]interface{} { upgradeCost = fmt.Sprintf("%0.2f", r.Res.AppTierUpgradeCost) } - out := map[string]interface{}{ + out := map[string]any{ "AppNameAvailable": boolToYesNo(r.Res.AppNameAvailable), "AppIsStatic": boolToYesNo(r.Res.AppIsStatic), "StaticApps": staticApps, @@ -365,7 +365,7 @@ func (r AppProposeResponse) KV() []map[string]interface{} { out["AppNameSuggestion"] = r.Res.AppNameSuggestion } - return []map[string]interface{}{out} + return []map[string]any{out} } func (r AppProposeResponse) JSON(w io.Writer) error { @@ -402,8 +402,8 @@ func (a AppAlerts) ColMap() map[string]string { } } -func (a AppAlerts) KV() []map[string]interface{} { - out := make([]map[string]interface{}, len(a)) +func (a AppAlerts) KV() []map[string]any { + out := make([]map[string]any, len(a)) for i, alert := range a { var trigger string @@ -439,7 +439,7 @@ func (a AppAlerts) KV() []map[string]interface{} { trigger = "Unknown" } - out[i] = map[string]interface{}{ + out[i] = map[string]any{ "ID": alert.ID, "Spec.Rule": alert.Spec.Rule, "Trigger": trigger, @@ -480,11 +480,11 @@ func (b Buildpacks) ColMap() map[string]string { } } -func (b Buildpacks) KV() []map[string]interface{} { - out := make([]map[string]interface{}, len(b)) +func (b Buildpacks) KV() []map[string]any { + out := make([]map[string]any, len(b)) for i, bp := range b { - out[i] = map[string]interface{}{ + out[i] = map[string]any{ "Name": bp.Name, "ID": bp.ID, "Version": bp.Version, diff --git a/commands/displayers/balance.go b/commands/displayers/balance.go index 7bd598f91..643568f11 100644 --- a/commands/displayers/balance.go +++ b/commands/displayers/balance.go @@ -45,13 +45,13 @@ func (a *Balance) ColMap() map[string]string { } } -func (a *Balance) KV() []map[string]interface{} { - x := map[string]interface{}{ +func (a *Balance) KV() []map[string]any { + x := map[string]any{ "MonthToDateBalance": a.MonthToDateBalance, "AccountBalance": a.AccountBalance, "MonthToDateUsage": a.MonthToDateUsage, "GeneratedAt": a.GeneratedAt.Format(time.RFC3339), } - return []map[string]interface{}{x} + return []map[string]any{x} } diff --git a/commands/displayers/billing_history.go b/commands/displayers/billing_history.go index 3bb2dce1f..b83a63941 100644 --- a/commands/displayers/billing_history.go +++ b/commands/displayers/billing_history.go @@ -47,7 +47,7 @@ func (i *BillingHistory) ColMap() map[string]string { } } -func (i *BillingHistory) KV() []map[string]interface{} { +func (i *BillingHistory) KV() []map[string]any { fromStringP := func(s *string) string { if s == nil { return "" @@ -55,9 +55,9 @@ func (i *BillingHistory) KV() []map[string]interface{} { return *s } history := i.BillingHistory.BillingHistory.BillingHistory - out := make([]map[string]interface{}, 0, len(history)) + out := make([]map[string]any, 0, len(history)) for _, ii := range history { - x := map[string]interface{}{ + x := map[string]any{ "Date": ii.Date.Format(time.RFC3339), "Type": ii.Type, "Description": ii.Description, diff --git a/commands/displayers/cdn.go b/commands/displayers/cdn.go index 94a53d11b..4242d56c7 100644 --- a/commands/displayers/cdn.go +++ b/commands/displayers/cdn.go @@ -47,11 +47,11 @@ func (c *CDN) ColMap() map[string]string { } } -func (c *CDN) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(c.CDNs)) +func (c *CDN) KV() []map[string]any { + out := make([]map[string]any, 0, len(c.CDNs)) for _, cdn := range c.CDNs { - m := map[string]interface{}{ + m := map[string]any{ "ID": cdn.ID, "Origin": cdn.Origin, "Endpoint": cdn.Endpoint, diff --git a/commands/displayers/certificate.go b/commands/displayers/certificate.go index 960c3b200..32492efb6 100644 --- a/commands/displayers/certificate.go +++ b/commands/displayers/certificate.go @@ -56,11 +56,11 @@ func (c *Certificate) ColMap() map[string]string { } } -func (c *Certificate) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(c.Certificates)) +func (c *Certificate) KV() []map[string]any { + out := make([]map[string]any, 0, len(c.Certificates)) for _, c := range c.Certificates { - o := map[string]interface{}{ + o := map[string]any{ "ID": c.ID, "Name": c.Name, "DNSNames": strings.Join(c.DNSNames, ","), diff --git a/commands/displayers/database.go b/commands/displayers/database.go index 0653d2c4a..f7c18145a 100644 --- a/commands/displayers/database.go +++ b/commands/displayers/database.go @@ -94,11 +94,11 @@ func (d *Databases) ColMap() map[string]string { } } -func (d *Databases) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(d.Databases)) +func (d *Databases) KV() []map[string]any { + out := make([]map[string]any, 0, len(d.Databases)) for _, db := range d.Databases { - o := map[string]interface{}{ + o := map[string]any{ "ID": db.ID, "Name": db.Name, "Engine": db.EngineSlug, @@ -141,11 +141,11 @@ func (db *DatabaseBackups) ColMap() map[string]string { } } -func (db *DatabaseBackups) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(db.DatabaseBackups)) +func (db *DatabaseBackups) KV() []map[string]any { + out := make([]map[string]any, 0, len(db.DatabaseBackups)) for _, b := range db.DatabaseBackups { - o := map[string]interface{}{ + o := map[string]any{ "Size": b.SizeGigabytes, "Created": b.CreatedAt, } @@ -181,11 +181,11 @@ func (du *DatabaseUsers) ColMap() map[string]string { } } -func (du *DatabaseUsers) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(du.DatabaseUsers)) +func (du *DatabaseUsers) KV() []map[string]any { + out := make([]map[string]any, 0, len(du.DatabaseUsers)) for _, u := range du.DatabaseUsers { - o := map[string]interface{}{ + o := map[string]any{ "Role": u.Role, "Name": u.Name, "Password": u.Password, @@ -230,9 +230,9 @@ func (dc *DatabaseConnection) ColMap() map[string]string { } } -func (dc *DatabaseConnection) KV() []map[string]interface{} { +func (dc *DatabaseConnection) KV() []map[string]any { c := dc.DatabaseConnection - o := map[string]interface{}{ + o := map[string]any{ "URI": c.URI, "Database": c.Database, "Host": c.Host, @@ -242,7 +242,7 @@ func (dc *DatabaseConnection) KV() []map[string]interface{} { "SSL": c.SSL, } - return []map[string]interface{}{o} + return []map[string]any{o} } type DatabaseReplicas struct { @@ -296,11 +296,11 @@ func (dr *DatabaseReplicas) ColMap() map[string]string { } } -func (dr *DatabaseReplicas) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(dr.DatabaseReplicas)) +func (dr *DatabaseReplicas) KV() []map[string]any { + out := make([]map[string]any, 0, len(dr.DatabaseReplicas)) for _, r := range dr.DatabaseReplicas { - o := map[string]interface{}{ + o := map[string]any{ "Name": r.Name, "ID": r.ID, "Region": r.Region, @@ -336,7 +336,7 @@ func (do *DatabaseOptions) ColMap() map[string]string { } } -func (do *DatabaseOptions) KV() []map[string]interface{} { +func (do *DatabaseOptions) KV() []map[string]any { engines := make([]string, 0) nonEmptyOptionsFn := func(opt godo.DatabaseEngineOptions) bool { return len(opt.Layouts) > 0 || len(opt.Regions) > 0 || len(opt.Versions) > 0 @@ -357,9 +357,9 @@ func (do *DatabaseOptions) KV() []map[string]interface{} { engines = append(engines, "kafka") } - out := make([]map[string]interface{}, 0, len(engines)) + out := make([]map[string]any, 0, len(engines)) for _, eng := range engines { - o := map[string]interface{}{ + o := map[string]any{ "Engine": eng, } out = append(out, o) @@ -391,8 +391,8 @@ func (dbr *DatabaseRegionOptions) ColMap() map[string]string { } } -func (dbr *DatabaseRegionOptions) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0) +func (dbr *DatabaseRegionOptions) KV() []map[string]any { + out := make([]map[string]any, 0) regionEngineMap := make(map[string][]string, 0) for eng, regions := range dbr.RegionMap { for _, r := range regions { @@ -400,7 +400,7 @@ func (dbr *DatabaseRegionOptions) KV() []map[string]interface{} { } } for r, engines := range regionEngineMap { - o := map[string]interface{}{ + o := map[string]any{ "Region": r, "Engines": "[" + strings.Join(engines, ",") + "]", } @@ -433,10 +433,10 @@ func (dbv *DatabaseVersionOptions) ColMap() map[string]string { } } -func (dbv *DatabaseVersionOptions) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0) +func (dbv *DatabaseVersionOptions) KV() []map[string]any { + out := make([]map[string]any, 0) for eng, versions := range dbv.VersionMap { - o := map[string]interface{}{ + o := map[string]any{ "Engine": eng, "Versions": "[" + strings.Join(versions, ",") + "]", } @@ -469,8 +469,8 @@ func (dbl *DatabaseLayoutOptions) ColMap() map[string]string { } } -func (dbl *DatabaseLayoutOptions) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0) +func (dbl *DatabaseLayoutOptions) KV() []map[string]any { + out := make([]map[string]any, 0) slugNodeMap := make(map[string][]string, 0) for _, layout := range dbl.Layouts { for _, s := range layout.Sizes { @@ -485,7 +485,7 @@ func (dbl *DatabaseLayoutOptions) KV() []map[string]interface{} { sort.Strings(keys) for _, k := range keys { - o := map[string]interface{}{ + o := map[string]any{ "Slug": k, "NodeNums": "[" + strings.Join(slugNodeMap[k], ",") + "]", } @@ -526,11 +526,11 @@ func (dp *DatabasePools) ColMap() map[string]string { } } -func (dp *DatabasePools) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(dp.DatabasePools)) +func (dp *DatabasePools) KV() []map[string]any { + out := make([]map[string]any, 0, len(dp.DatabasePools)) for _, p := range dp.DatabasePools { - o := map[string]interface{}{ + o := map[string]any{ "User": p.User, "Name": p.Name, "Size": p.Size, @@ -570,15 +570,15 @@ func (dmw *DatabaseMaintenanceWindow) ColMap() map[string]string { } } -func (dmw *DatabaseMaintenanceWindow) KV() []map[string]interface{} { +func (dmw *DatabaseMaintenanceWindow) KV() []map[string]any { mw := dmw.DatabaseMaintenanceWindow - o := map[string]interface{}{ + o := map[string]any{ "Day": mw.Day, "Hour": mw.Hour, "Pending": mw.Pending, } - return []map[string]interface{}{o} + return []map[string]any{o} } type DatabaseDBs struct { @@ -603,11 +603,11 @@ func (db *DatabaseDBs) ColMap() map[string]string { } } -func (db *DatabaseDBs) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(db.DatabaseDBs)) +func (db *DatabaseDBs) KV() []map[string]any { + out := make([]map[string]any, 0, len(db.DatabaseDBs)) for _, p := range db.DatabaseDBs { - o := map[string]interface{}{ + o := map[string]any{ "Name": p.Name, } out = append(out, o) @@ -638,11 +638,11 @@ func (dsm *DatabaseSQLModes) ColMap() map[string]string { } } -func (dsm *DatabaseSQLModes) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(dsm.DatabaseSQLModes)) +func (dsm *DatabaseSQLModes) KV() []map[string]any { + out := make([]map[string]any, 0, len(dsm.DatabaseSQLModes)) for _, p := range dsm.DatabaseSQLModes { - o := map[string]interface{}{ + o := map[string]any{ "Name": p, } out = append(out, o) @@ -680,11 +680,11 @@ func (dr *DatabaseFirewallRules) ColMap() map[string]string { } } -func (dr *DatabaseFirewallRules) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(dr.DatabaseFirewallRules)) +func (dr *DatabaseFirewallRules) KV() []map[string]any { + out := make([]map[string]any, 0, len(dr.DatabaseFirewallRules)) for _, r := range dr.DatabaseFirewallRules { - o := map[string]interface{}{ + o := map[string]any{ "UUID": r.UUID, "ClusterUUID": r.ClusterUUID, "Type": r.Type, @@ -696,6 +696,279 @@ func (dr *DatabaseFirewallRules) KV() []map[string]interface{} { return out } +type DatabaseKafkaTopics struct { + DatabaseTopics do.DatabaseTopics +} + +var _ Displayable = &DatabaseKafkaTopics{} + +func (dt *DatabaseKafkaTopics) JSON(out io.Writer) error { + return writeJSON(dt.DatabaseTopics, out) +} + +func (dt *DatabaseKafkaTopics) Cols() []string { + return []string{ + "Name", + "State", + "ReplicationFactor", + } +} + +func (dt *DatabaseKafkaTopics) ColMap() map[string]string { + + return map[string]string{ + "Name": "Name", + "State": "State", + "ReplicationFactor": "ReplicationFactor", + } +} + +func (dt *DatabaseKafkaTopics) KV() []map[string]any { + out := make([]map[string]any, 0, len(dt.DatabaseTopics)) + + for _, t := range dt.DatabaseTopics { + o := map[string]any{ + "Name": t.Name, + "State": t.State, + "ReplicationFactor": *t.ReplicationFactor, + } + out = append(out, o) + } + + return out +} + +type DatabaseKafkaTopicPartitions struct { + DatabaseTopicPartitions []*godo.TopicPartition +} + +var _ Displayable = &DatabaseKafkaTopicPartitions{} + +func (dp *DatabaseKafkaTopicPartitions) JSON(out io.Writer) error { + return writeJSON(dp.DatabaseTopicPartitions, out) +} + +func (dp *DatabaseKafkaTopicPartitions) Cols() []string { + return []string{ + "Id", + "InSyncReplicas", + "EarliestOffset", + "Size", + } +} + +func (dp *DatabaseKafkaTopicPartitions) ColMap() map[string]string { + + return map[string]string{ + "Id": "Id", + "InSyncReplicas": "InSyncReplicas", + "EarliestOffset": "EarliestOffset", + "Size": "Size", + } +} + +func (dp *DatabaseKafkaTopicPartitions) KV() []map[string]any { + out := make([]map[string]any, 0, len(dp.DatabaseTopicPartitions)) + + for _, p := range dp.DatabaseTopicPartitions { + o := map[string]any{ + "Id": p.Id, + "InSyncReplicas": p.InSyncReplicas, + "EarliestOffset": p.EarliestOffset, + "Size": p.Size, + } + out = append(out, o) + } + + return out +} + +type DatabaseKafkaTopic struct { + DatabaseTopic do.DatabaseTopic +} + +var _ Displayable = &DatabaseKafkaTopic{} + +func (dt *DatabaseKafkaTopic) JSON(out io.Writer) error { + return writeJSON(dt.DatabaseTopic, out) +} + +func (dt *DatabaseKafkaTopic) Cols() []string { + return []string{ + "key", + "value", + } +} + +func (dt *DatabaseKafkaTopic) ColMap() map[string]string { + + return map[string]string{ + "key": "key", + "value": "value", + } +} + +func (dt *DatabaseKafkaTopic) KV() []map[string]any { + t := dt.DatabaseTopic + o := []map[string]any{ + { + "key": "Name", + "value": t.Name, + }, + { + "key": "State", + "value": t.State, + }, + { + "key": "ReplicationFactor", + "value": *t.ReplicationFactor, + }, + { + "key": "PartitionCount", + "value": len(t.Partitions), + }, + } + + if t.Config != nil { + cfg := make([]map[string]any, 0) + if t.Config.CleanupPolicy != "" { + cfg = append(cfg, map[string]any{ + "key": "CleanupPolicy", + "value": t.Config.CleanupPolicy, + }) + } + if t.Config.CompressionType != "" { + cfg = append(cfg, map[string]any{ + "key": "CompressionType", + "value": t.Config.CompressionType, + }) + } + if t.Config.DeleteRetentionMS != nil { + cfg = append(cfg, map[string]any{ + "key": "DeleteRetentionMS", + "value": *t.Config.DeleteRetentionMS, + }) + } + if t.Config.FileDeleteDelayMS != nil { + cfg = append(cfg, map[string]any{ + "key": "FileDeleteDelayMS", + "value": *t.Config.FileDeleteDelayMS, + }) + } + if t.Config.FlushMessages != nil { + cfg = append(cfg, map[string]any{ + "key": "FlushMessages", + "value": *t.Config.FlushMessages, + }) + } + if t.Config.FlushMS != nil { + cfg = append(cfg, map[string]any{ + "key": "FlushMS", + "value": *t.Config.FlushMS, + }) + } + if t.Config.IndexIntervalBytes != nil { + cfg = append(cfg, map[string]any{ + "key": "IndexIntervalBytes", + "value": *t.Config.IndexIntervalBytes, + }) + } + if t.Config.MaxCompactionLagMS != nil { + cfg = append(cfg, map[string]any{ + "key": "MaxCompactionLagMS", + "value": *t.Config.MaxCompactionLagMS, + }) + } + if t.Config.MessageDownConversionEnable != nil { + cfg = append(cfg, map[string]any{ + "key": "MessageDownConversionEnable", + "value": *t.Config.MessageDownConversionEnable, + }) + } + if t.Config.MessageFormatVersion != "" { + cfg = append(cfg, map[string]any{ + "key": "MessageFormatVersion", + "value": t.Config.MessageFormatVersion, + }) + } + if t.Config.MessageTimestampDifferenceMaxMS != nil { + cfg = append(cfg, map[string]any{ + "key": "MessageTimestampDifferentMaxMS", + "value": *t.Config.MessageTimestampDifferenceMaxMS, + }) + } + if t.Config.MessageTimestampType != "" { + cfg = append(cfg, map[string]any{ + "key": "MessageTimestampType", + "value": t.Config.MessageTimestampType, + }) + } + if t.Config.MinCleanableDirtyRatio != nil { + cfg = append(cfg, map[string]any{ + "key": "MinCleanableDirtyRatio", + "value": *t.Config.MinCleanableDirtyRatio, + }) + } + if t.Config.MinCompactionLagMS != nil { + cfg = append(cfg, map[string]any{ + "key": "MinCompactionLagMS", + "value": *t.Config.MinCompactionLagMS, + }) + } + if t.Config.MinInsyncReplicas != nil { + cfg = append(cfg, map[string]any{ + "key": "MinInsyncReplicas", + "value": *t.Config.MinInsyncReplicas, + }) + } + if t.Config.Preallocate != nil { + cfg = append(cfg, map[string]any{ + "key": "Preallocate", + "value": *t.Config.Preallocate, + }) + } + if t.Config.RetentionBytes != nil { + cfg = append(cfg, map[string]any{ + "key": "RetentionBytes", + "value": *t.Config.RetentionBytes, + }) + } + if t.Config.RetentionMS != nil { + cfg = append(cfg, map[string]any{ + "key": "RetentionMS", + "value": *t.Config.RetentionMS, + }) + } + if t.Config.SegmentBytes != nil { + cfg = append(cfg, map[string]any{ + "key": "SegmentBytes", + "value": *t.Config.SegmentBytes, + }) + } + if t.Config.SegmentIndexBytes != nil { + cfg = append(cfg, map[string]any{ + "key": "SegmentIndexBytes", + "value": *t.Config.SegmentIndexBytes, + }) + } + if t.Config.SegmentJitterMS != nil { + cfg = append(cfg, map[string]any{ + "key": "SegmentJitterMS", + "value": *t.Config.SegmentJitterMS, + }) + } + if t.Config.SegmentMS != nil { + cfg = append(cfg, map[string]any{ + "key": "SegmentMS", + "value": *t.Config.SegmentMS, + }) + } + o = append(o, cfg...) + } + + return o +} + type MySQLConfiguration struct { MySQLConfiguration do.MySQLConfig } @@ -720,167 +993,167 @@ func (dc *MySQLConfiguration) ColMap() map[string]string { } } -func (dc *MySQLConfiguration) KV() []map[string]interface{} { +func (dc *MySQLConfiguration) KV() []map[string]any { c := dc.MySQLConfiguration - o := []map[string]interface{}{} + o := []map[string]any{} if c.ConnectTimeout != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "ConnectTimeout", "value": *c.ConnectTimeout, }) } if c.DefaultTimeZone != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "DefaultTimeZone", "value": *c.DefaultTimeZone, }) } if c.InnodbLogBufferSize != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "InnodbLogBufferSize", "value": *c.InnodbLogBufferSize, }) } if c.InnodbOnlineAlterLogMaxSize != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "InnodbOnlineAlterLogMaxSize", "value": *c.InnodbOnlineAlterLogMaxSize, }) } if c.InnodbLockWaitTimeout != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "InnodbLockWaitTimeout", "value": *c.InnodbLockWaitTimeout, }) } if c.InteractiveTimeout != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "InteractiveTimeout", "value": *c.InteractiveTimeout, }) } if c.MaxAllowedPacket != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxAllowedPacket", "value": *c.MaxAllowedPacket, }) } if c.NetReadTimeout != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "NetReadTimeout", "value": *c.NetReadTimeout, }) } if c.SortBufferSize != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "SortBufferSize", "value": *c.SortBufferSize, }) } if c.SQLMode != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "SQLMode", "value": *c.SQLMode, }) } if c.SQLRequirePrimaryKey != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "SQLRequirePrimaryKey", "value": *c.SQLRequirePrimaryKey, }) } if c.WaitTimeout != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "WaitTimeout", "value": *c.WaitTimeout, }) } if c.NetWriteTimeout != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "NetWriteTimeout", "value": *c.NetWriteTimeout, }) } if c.GroupConcatMaxLen != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "GroupConcatMaxLen", "value": *c.GroupConcatMaxLen, }) } if c.InformationSchemaStatsExpiry != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "InformationSchemaStatsExpiry", "value": *c.InformationSchemaStatsExpiry, }) } if c.InnodbFtMinTokenSize != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "InnodbFtMinTokenSize", "value": *c.InnodbFtMinTokenSize, }) } if c.InnodbFtServerStopwordTable != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "InnodbFtServerStopwordTable", "value": *c.InnodbFtServerStopwordTable, }) } if c.InnodbPrintAllDeadlocks != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "InnodbPrintAllDeadlocks", "value": *c.InnodbPrintAllDeadlocks, }) } if c.InnodbRollbackOnTimeout != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "InnodbRollbackOnTimeout", "value": *c.InnodbRollbackOnTimeout, }) } if c.InternalTmpMemStorageEngine != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "InternalTmpMemStorageEngine", "value": *c.InternalTmpMemStorageEngine, }) } if c.MaxHeapTableSize != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxHeapTableSize", "value": *c.MaxHeapTableSize, }) } if c.TmpTableSize != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "TmpTableSize", "value": *c.TmpTableSize, }) } if c.SlowQueryLog != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "SlowQueryLog", "value": *c.SlowQueryLog, }) } if c.LongQueryTime != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "LongQueryTime", "value": *c.LongQueryTime, }) } if c.BackupHour != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "BackupHour", "value": *c.BackupHour, }) } if c.BackupMinute != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "BackupMinute", "value": *c.BackupMinute, }) } if c.BinlogRetentionPeriod != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "BinlogRetentionPeriod", "value": *c.BinlogRetentionPeriod, }) @@ -913,361 +1186,361 @@ func (dc *PostgreSQLConfiguration) ColMap() map[string]string { } } -func (dc *PostgreSQLConfiguration) KV() []map[string]interface{} { +func (dc *PostgreSQLConfiguration) KV() []map[string]any { c := dc.PostgreSQLConfig - o := []map[string]interface{}{} + o := []map[string]any{} if c.AutovacuumFreezeMaxAge != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "AutovacuumFreezeMaxAge", "value": *c.AutovacuumFreezeMaxAge, }) } if c.AutovacuumMaxWorkers != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "AutovacuumMaxWorkers", "value": *c.AutovacuumMaxWorkers, }) } if c.AutovacuumNaptime != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "AutovacuumNaptime", "value": *c.AutovacuumNaptime, }) } if c.AutovacuumVacuumThreshold != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "AutovacuumVacuumThreshold", "value": *c.AutovacuumVacuumThreshold, }) } if c.AutovacuumAnalyzeThreshold != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "AutovacuumAnalyzeThreshold", "value": *c.AutovacuumAnalyzeThreshold, }) } if c.AutovacuumVacuumScaleFactor != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "AutovacuumVacuumScaleFactor", "value": *c.AutovacuumVacuumScaleFactor, }) } if c.AutovacuumAnalyzeScaleFactor != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "AutovacuumAnalyzeScaleFactor", "value": *c.AutovacuumAnalyzeScaleFactor, }) } if c.AutovacuumVacuumCostDelay != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "AutovacuumVacuumCostDelay", "value": *c.AutovacuumVacuumCostDelay, }) } if c.AutovacuumVacuumCostLimit != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "AutovacuumVacuumCostLimit", "value": *c.AutovacuumVacuumCostLimit, }) } if c.BGWriterDelay != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "BGWriterDelay", "value": *c.BGWriterDelay, }) } if c.BGWriterFlushAfter != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "BGWriterFlushAfter", "value": *c.BGWriterFlushAfter, }) } if c.BGWriterLRUMaxpages != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "BGWriterLRUMaxpages", "value": *c.BGWriterLRUMaxpages, }) } if c.BGWriterLRUMultiplier != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "BGWriterLRUMultiplier", "value": *c.BGWriterLRUMultiplier, }) } if c.DeadlockTimeoutMillis != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "DeadlockTimeoutMillis", "value": *c.DeadlockTimeoutMillis, }) } if c.DefaultToastCompression != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "DefaultToastCompression", "value": *c.DefaultToastCompression, }) } if c.IdleInTransactionSessionTimeout != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "IdleInTransactionSessionTimeout", "value": *c.IdleInTransactionSessionTimeout, }) } if c.JIT != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "JIT", "value": *c.JIT, }) } if c.LogAutovacuumMinDuration != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "LogAutovacuumMinDuration", "value": *c.LogAutovacuumMinDuration, }) } if c.LogErrorVerbosity != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "LogErrorVerbosity", "value": *c.LogErrorVerbosity, }) } if c.LogLinePrefix != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "LogLinePrefix", "value": *c.LogLinePrefix, }) } if c.LogMinDurationStatement != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "LogMinDurationStatement", "value": *c.LogMinDurationStatement, }) } if c.MaxFilesPerProcess != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxFilesPerProcess", "value": *c.MaxFilesPerProcess, }) } if c.MaxPreparedTransactions != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxPreparedTransactions", "value": *c.MaxPreparedTransactions, }) } if c.MaxPredLocksPerTransaction != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxPredLocksPerTransaction", "value": *c.MaxPredLocksPerTransaction, }) } if c.MaxLocksPerTransaction != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxLocksPerTransaction", "value": *c.MaxLocksPerTransaction, }) } if c.MaxStackDepth != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxStackDepth", "value": *c.MaxStackDepth, }) } if c.MaxStandbyArchiveDelay != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxStandbyArchiveDelay", "value": *c.MaxStandbyArchiveDelay, }) } if c.MaxStandbyStreamingDelay != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxStandbyStreamingDelay", "value": *c.MaxStandbyStreamingDelay, }) } if c.MaxReplicationSlots != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxReplicationSlots", "value": *c.MaxReplicationSlots, }) } if c.MaxLogicalReplicationWorkers != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxLogicalReplicationWorkers", "value": *c.MaxLogicalReplicationWorkers, }) } if c.MaxParallelWorkers != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxParallelWorkers", "value": *c.MaxParallelWorkers, }) } if c.MaxParallelWorkersPerGather != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxParallelWorkersPerGather", "value": *c.MaxParallelWorkersPerGather, }) } if c.MaxWorkerProcesses != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxWorkerProcesses", "value": *c.MaxWorkerProcesses, }) } if c.PGPartmanBGWRole != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "PGPartmanBGWRole", "value": *c.PGPartmanBGWRole, }) } if c.PGPartmanBGWInterval != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "PGPartmanBGWInterval", "value": *c.PGPartmanBGWInterval, }) } if c.PGStatStatementsTrack != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "PGStatStatementsTrack", "value": *c.PGStatStatementsTrack, }) } if c.TempFileLimit != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "TempFileLimit", "value": *c.TempFileLimit, }) } if c.Timezone != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "Timezone", "value": *c.Timezone, }) } if c.TrackActivityQuerySize != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "TrackActivityQuerySize", "value": *c.TrackActivityQuerySize, }) } if c.TrackCommitTimestamp != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "TrackCommitTimestamp", "value": *c.TrackCommitTimestamp, }) } if c.TrackFunctions != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "TrackFunctions", "value": *c.TrackFunctions, }) } if c.TrackIOTiming != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "TrackIOTiming", "value": *c.TrackIOTiming, }) } if c.MaxWalSenders != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "MaxWalSenders", "value": *c.MaxWalSenders, }) } if c.WalSenderTimeout != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "WalSenderTimeout", "value": *c.WalSenderTimeout, }) } if c.WalWriterDelay != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "WalWriterDelay", "value": *c.WalWriterDelay, }) } if c.SharedBuffersPercentage != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "SharedBuffersPercentage", "value": *c.SharedBuffersPercentage, }) } if c.PgBouncer != nil { if c.PgBouncer.ServerResetQueryAlways != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "PgBouncer.ServerResetQueryAlways", "value": *c.PgBouncer.ServerResetQueryAlways, }) } if c.PgBouncer.IgnoreStartupParameters != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "PgBouncer.IgnoreStartupParameters", "value": strings.Join(*c.PgBouncer.IgnoreStartupParameters, ","), }) } if c.PgBouncer.MinPoolSize != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "PgBouncer.MinPoolSize", "value": *c.PgBouncer.MinPoolSize, }) } if c.PgBouncer.ServerLifetime != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "PgBouncer.ServerLifetime", "value": *c.PgBouncer.ServerLifetime, }) } if c.PgBouncer.ServerIdleTimeout != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "PgBouncer.ServerIdleTimeout", "value": *c.PgBouncer.ServerIdleTimeout, }) } if c.PgBouncer.AutodbPoolSize != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "PgBouncer.AutodbPoolSize", "value": *c.PgBouncer.AutodbPoolSize, }) } if c.PgBouncer.AutodbPoolMode != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "PgBouncer.AutodbPoolMode", "value": *c.PgBouncer.AutodbPoolMode, }) } if c.PgBouncer.AutodbMaxDbConnections != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "PgBouncer.AutodbMaxDbConnections", "value": *c.PgBouncer.AutodbMaxDbConnections, }) } if c.PgBouncer.AutodbIdleTimeout != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "PgBouncer.AutodbIdleTimeout", "value": *c.PgBouncer.AutodbIdleTimeout, }) } } if c.BackupHour != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "BackupHour", "value": *c.BackupHour, }) } if c.BackupMinute != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "BackupMinute", "value": *c.BackupMinute, }) } if c.WorkMem != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "WorkMem", "value": *c.WorkMem, }) } if c.TimeScaleDB != nil && c.TimeScaleDB.MaxBackgroundWorkers != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "TimeScaleDB.MaxBackgroundWorkers", "value": *c.TimeScaleDB.MaxBackgroundWorkers, }) @@ -1299,71 +1572,71 @@ func (dc *RedisConfiguration) ColMap() map[string]string { } } -func (dc *RedisConfiguration) KV() []map[string]interface{} { +func (dc *RedisConfiguration) KV() []map[string]any { c := dc.RedisConfig - o := []map[string]interface{}{} + o := []map[string]any{} if c.RedisMaxmemoryPolicy != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "RedisMaxmemoryPolicy", "value": *c.RedisMaxmemoryPolicy, }) } if c.RedisPubsubClientOutputBufferLimit != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "RedisPubsubClientOutputBufferLimit", "value": *c.RedisPubsubClientOutputBufferLimit, }) } if c.RedisNumberOfDatabases != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "RedisNumberOfDatabases", "value": *c.RedisNumberOfDatabases, }) } if c.RedisIOThreads != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "RedisIOThreads", "value": *c.RedisIOThreads, }) } if c.RedisLFULogFactor != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "RedisLFULogFactor", "value": *c.RedisLFULogFactor, }) } if c.RedisLFUDecayTime != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "RedisLFUDecayTime", "value": *c.RedisLFUDecayTime, }) } if c.RedisSSL != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "RedisSSL", "value": *c.RedisSSL, }) } if c.RedisTimeout != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "RedisTimeout", "value": *c.RedisTimeout, }) } if c.RedisNotifyKeyspaceEvents != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "RedisNotifyKeyspaceEvents", "value": *c.RedisNotifyKeyspaceEvents, }) } if c.RedisPersistence != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "RedisPersistence", "value": *c.RedisPersistence, }) } if c.RedisACLChannelsDefault != nil { - o = append(o, map[string]interface{}{ + o = append(o, map[string]any{ "key": "RedisACLChannelsDefault", "value": *c.RedisACLChannelsDefault, }) diff --git a/commands/displayers/domain.go b/commands/displayers/domain.go index 376ba1148..51f6879dc 100644 --- a/commands/displayers/domain.go +++ b/commands/displayers/domain.go @@ -39,11 +39,11 @@ func (d *Domain) ColMap() map[string]string { } } -func (d *Domain) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(d.Domains)) +func (d *Domain) KV() []map[string]any { + out := make([]map[string]any, 0, len(d.Domains)) for _, do := range d.Domains { - o := map[string]interface{}{ + o := map[string]any{ "Domain": do.Name, "TTL": do.TTL, } out = append(out, o) @@ -89,11 +89,11 @@ func (dr *DomainRecord) ColMap() map[string]string { return defaultColMap } -func (dr *DomainRecord) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(dr.DomainRecords)) +func (dr *DomainRecord) KV() []map[string]any { + out := make([]map[string]any, 0, len(dr.DomainRecords)) for _, d := range dr.DomainRecords { - o := map[string]interface{}{ + o := map[string]any{ "ID": d.ID, "Type": d.Type, "Name": d.Name, "Data": d.Data, "Priority": d.Priority, "Port": d.Port, "TTL": d.TTL, "Weight": d.Weight, diff --git a/commands/displayers/droplet.go b/commands/displayers/droplet.go index 5fb981264..3cb0d4a5a 100644 --- a/commands/displayers/droplet.go +++ b/commands/displayers/droplet.go @@ -49,8 +49,8 @@ func (d *Droplet) ColMap() map[string]string { } } -func (d *Droplet) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(d.Droplets)) +func (d *Droplet) KV() []map[string]any { + out := make([]map[string]any, 0, len(d.Droplets)) for _, d := range d.Droplets { sort.Strings(d.Tags) tags := strings.Join(d.Tags, ",") @@ -60,7 +60,7 @@ func (d *Droplet) KV() []map[string]interface{} { ip6, _ := d.PublicIPv6() features := strings.Join(d.Features, ",") volumes := strings.Join(d.VolumeIDs, ",") - m := map[string]interface{}{ + m := map[string]any{ "ID": d.ID, "Name": d.Name, "PublicIPv4": ip, "PrivateIPv4": privIP, "PublicIPv6": ip6, "Memory": d.Memory, "VCPUs": d.Vcpus, "Disk": d.Disk, "Region": d.Region.Slug, "Image": image, "VPCUUID": d.VPCUUID, "Status": d.Status, diff --git a/commands/displayers/firewall.go b/commands/displayers/firewall.go index 1948d879c..1440cbb7a 100644 --- a/commands/displayers/firewall.go +++ b/commands/displayers/firewall.go @@ -60,12 +60,12 @@ func (f *Firewall) ColMap() map[string]string { } } -func (f *Firewall) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(f.Firewalls)) +func (f *Firewall) KV() []map[string]any { + out := make([]map[string]any, 0, len(f.Firewalls)) for _, fw := range f.Firewalls { irs, ors := firewallRulesPrintHelper(fw) - o := map[string]interface{}{ + o := map[string]any{ "ID": fw.ID, "Name": fw.Name, "Status": fw.Status, diff --git a/commands/displayers/functions.go b/commands/displayers/functions.go index a10c9e8c7..6f8f258ec 100644 --- a/commands/displayers/functions.go +++ b/commands/displayers/functions.go @@ -51,10 +51,10 @@ func (i *Functions) ColMap() map[string]string { } // KV is the displayer KV method specialized for functions list -func (i *Functions) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(i.Info)) +func (i *Functions) KV() []map[string]any { + out := make([]map[string]any, 0, len(i.Info)) for _, ii := range i.Info { - x := map[string]interface{}{ + x := map[string]any{ "Update": time.UnixMilli(ii.Updated).Format("01/02 03:04:05"), "Runtime": findRuntime(ii.Annotations), "Version": ii.Version, diff --git a/commands/displayers/image.go b/commands/displayers/image.go index 55b6fd3ec..d1740e1c2 100644 --- a/commands/displayers/image.go +++ b/commands/displayers/image.go @@ -42,8 +42,8 @@ func (gi *Image) ColMap() map[string]string { } } -func (gi *Image) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(gi.Images)) +func (gi *Image) KV() []map[string]any { + out := make([]map[string]any, 0, len(gi.Images)) for _, i := range gi.Images { publicStatus := false @@ -51,7 +51,7 @@ func (gi *Image) KV() []map[string]interface{} { publicStatus = true } - o := map[string]interface{}{ + o := map[string]any{ "ID": i.ID, "Name": i.Name, "Type": i.Type, "Distribution": i.Distribution, "Slug": i.Slug, "Public": publicStatus, "MinDisk": i.MinDiskSize, } diff --git a/commands/displayers/invoice.go b/commands/displayers/invoice.go index 3178f0929..e5742d6e4 100644 --- a/commands/displayers/invoice.go +++ b/commands/displayers/invoice.go @@ -53,10 +53,10 @@ func (i *Invoice) ColMap() map[string]string { } } -func (i *Invoice) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(i.Invoice.Invoice.InvoiceItems)) +func (i *Invoice) KV() []map[string]any { + out := make([]map[string]any, 0, len(i.Invoice.Invoice.InvoiceItems)) for _, ii := range i.Invoice.Invoice.InvoiceItems { - x := map[string]interface{}{ + x := map[string]any{ "ResourceID": ii.ResourceID, "ResourceUUID": ii.ResourceUUID, "Product": ii.Product, diff --git a/commands/displayers/invoice_list.go b/commands/displayers/invoice_list.go index fe901d42e..b1877a46a 100644 --- a/commands/displayers/invoice_list.go +++ b/commands/displayers/invoice_list.go @@ -43,17 +43,17 @@ func (i *InvoiceList) ColMap() map[string]string { } } -func (i *InvoiceList) KV() []map[string]interface{} { +func (i *InvoiceList) KV() []map[string]any { invoices := i.InvoiceList.Invoices - out := make([]map[string]interface{}, 0, len(invoices)+1) - x := map[string]interface{}{ + out := make([]map[string]any, 0, len(invoices)+1) + x := map[string]any{ "InvoiceUUID": "preview", "Amount": i.InvoicePreview.Amount, "InvoicePeriod": i.InvoicePreview.InvoicePeriod, } out = append(out, x) for _, ii := range invoices { - x := map[string]interface{}{ + x := map[string]any{ "InvoiceUUID": ii.InvoiceUUID, "Amount": ii.Amount, "InvoicePeriod": ii.InvoicePeriod, diff --git a/commands/displayers/invoice_summary.go b/commands/displayers/invoice_summary.go index 45f9e9eb7..5f246668a 100644 --- a/commands/displayers/invoice_summary.go +++ b/commands/displayers/invoice_summary.go @@ -59,8 +59,8 @@ func (i *InvoiceSummary) ColMap() map[string]string { } } -func (i *InvoiceSummary) KV() []map[string]interface{} { - x := map[string]interface{}{ +func (i *InvoiceSummary) KV() []map[string]any { + x := map[string]any{ "InvoiceUUID": i.InvoiceSummary.InvoiceUUID, "BillingPeriod": i.InvoiceSummary.BillingPeriod, "Amount": i.InvoiceSummary.Amount, @@ -73,5 +73,5 @@ func (i *InvoiceSummary) KV() []map[string]interface{} { "CreditsAndAdjustments": i.InvoiceSummary.CreditsAndAdjustments.Amount, } - return []map[string]interface{}{x} + return []map[string]any{x} } diff --git a/commands/displayers/kernel.go b/commands/displayers/kernel.go index fe4b89d8b..366c8fa5a 100644 --- a/commands/displayers/kernel.go +++ b/commands/displayers/kernel.go @@ -41,11 +41,11 @@ func (ke *Kernel) ColMap() map[string]string { } } -func (ke *Kernel) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(ke.Kernels)) +func (ke *Kernel) KV() []map[string]any { + out := make([]map[string]any, 0, len(ke.Kernels)) for _, k := range ke.Kernels { - o := map[string]interface{}{ + o := map[string]any{ "ID": k.ID, "Name": k.Name, "Version": k.Version, } diff --git a/commands/displayers/key.go b/commands/displayers/key.go index b1b36b93f..f8c23669a 100644 --- a/commands/displayers/key.go +++ b/commands/displayers/key.go @@ -42,11 +42,11 @@ func (ke *Key) ColMap() map[string]string { } } -func (ke *Key) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(ke.Keys)) +func (ke *Key) KV() []map[string]any { + out := make([]map[string]any, 0, len(ke.Keys)) for _, k := range ke.Keys { - o := map[string]interface{}{ + o := map[string]any{ "ID": k.ID, "Name": k.Name, "FingerPrint": k.Fingerprint, } @@ -81,11 +81,11 @@ func (ke *KeyGet) ColMap() map[string]string { } } -func (ke *KeyGet) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(ke.Keys)) +func (ke *KeyGet) KV() []map[string]any { + out := make([]map[string]any, 0, len(ke.Keys)) for _, k := range ke.Keys { - o := map[string]interface{}{ + o := map[string]any{ "ID": k.ID, "Name": k.Name, "FingerPrint": k.Fingerprint, "PublicKey": k.PublicKey, } diff --git a/commands/displayers/kubernetes.go b/commands/displayers/kubernetes.go index 6bea133e5..dbc6d958b 100644 --- a/commands/displayers/kubernetes.go +++ b/commands/displayers/kubernetes.go @@ -81,8 +81,8 @@ func (clusters *KubernetesClusters) ColMap() map[string]string { } } -func (clusters *KubernetesClusters) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(clusters.KubernetesClusters)) +func (clusters *KubernetesClusters) KV() []map[string]any { + out := make([]map[string]any, 0, len(clusters.KubernetesClusters)) for _, cluster := range clusters.KubernetesClusters { tags := strings.Join(cluster.Tags, ",") @@ -94,7 +94,7 @@ func (clusters *KubernetesClusters) KV() []map[string]interface{} { cluster.Status = new(godo.KubernetesClusterStatus) } - o := map[string]interface{}{ + o := map[string]any{ "ID": cluster.ID, "Name": cluster.Name, "Region": cluster.RegionSlug, @@ -153,8 +153,8 @@ func (nodePools *KubernetesNodePools) ColMap() map[string]string { } } -func (nodePools *KubernetesNodePools) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(nodePools.KubernetesNodePools)) +func (nodePools *KubernetesNodePools) KV() []map[string]any { + out := make([]map[string]any, 0, len(nodePools.KubernetesNodePools)) for _, nodePools := range nodePools.KubernetesNodePools { tags := strings.Join(nodePools.Tags, ",") @@ -163,7 +163,7 @@ func (nodePools *KubernetesNodePools) KV() []map[string]interface{} { nodes = append(nodes, node.Name) } - o := map[string]interface{}{ + o := map[string]any{ "ID": nodePools.ID, "Name": nodePools.Name, "Size": nodePools.Size, @@ -205,12 +205,12 @@ func (versions *KubernetesVersions) ColMap() map[string]string { } } -func (versions *KubernetesVersions) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(versions.KubernetesVersions)) +func (versions *KubernetesVersions) KV() []map[string]any { + out := make([]map[string]any, 0, len(versions.KubernetesVersions)) for _, version := range versions.KubernetesVersions { - o := map[string]interface{}{ + o := map[string]any{ "Slug": version.KubernetesVersion.Slug, "KubernetesVersion": version.KubernetesVersion.KubernetesVersion, "SupportedFeatures": strings.Join(version.KubernetesVersion.SupportedFeatures, ", "), @@ -245,12 +245,12 @@ func (regions *KubernetesRegions) ColMap() map[string]string { } } -func (regions *KubernetesRegions) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(regions.KubernetesRegions)) +func (regions *KubernetesRegions) KV() []map[string]any { + out := make([]map[string]any, 0, len(regions.KubernetesRegions)) for _, region := range regions.KubernetesRegions { - o := map[string]interface{}{ + o := map[string]any{ "Slug": region.KubernetesRegion.Slug, "Name": region.KubernetesRegion.Name, } @@ -284,12 +284,12 @@ func (nodeSizes *KubernetesNodeSizes) ColMap() map[string]string { } } -func (nodeSizes *KubernetesNodeSizes) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(nodeSizes.KubernetesNodeSizes)) +func (nodeSizes *KubernetesNodeSizes) KV() []map[string]any { + out := make([]map[string]any, 0, len(nodeSizes.KubernetesNodeSizes)) for _, size := range nodeSizes.KubernetesNodeSizes { - o := map[string]interface{}{ + o := map[string]any{ "Slug": size.KubernetesNodeSize.Slug, "Name": size.KubernetesNodeSize.Name, } @@ -325,13 +325,13 @@ func (ar *KubernetesAssociatedResources) ColMap() map[string]string { } } -func (ar *KubernetesAssociatedResources) KV() []map[string]interface{} { - o := map[string]interface{}{ +func (ar *KubernetesAssociatedResources) KV() []map[string]any { + o := map[string]any{ "Volumes": flattenAssociatedResourceIDs(ar.KubernetesAssociatedResources.Volumes), "VolumeSnapshots": flattenAssociatedResourceIDs(ar.KubernetesAssociatedResources.VolumeSnapshots), "LoadBalancers": flattenAssociatedResourceIDs(ar.KubernetesAssociatedResources.LoadBalancers), } - return []map[string]interface{}{o} + return []map[string]any{o} } func flattenAssociatedResourceIDs(resources []*godo.AssociatedResource) (out []string) { diff --git a/commands/displayers/load_balancer.go b/commands/displayers/load_balancer.go index 788172e1d..11dd53d17 100644 --- a/commands/displayers/load_balancer.go +++ b/commands/displayers/load_balancer.go @@ -74,8 +74,8 @@ func (lb *LoadBalancer) ColMap() map[string]string { } } -func (lb *LoadBalancer) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(lb.LoadBalancers)) +func (lb *LoadBalancer) KV() []map[string]any { + out := make([]map[string]any, 0, len(lb.LoadBalancers)) for _, l := range lb.LoadBalancers { forwardingRules := make([]string, 0, len(l.ForwardingRules)) @@ -83,7 +83,7 @@ func (lb *LoadBalancer) KV() []map[string]interface{} { forwardingRules = append(forwardingRules, prettyPrintStruct(r)) } - o := map[string]interface{}{ + o := map[string]any{ "ID": l.ID, "IP": l.IP, "Name": l.Name, @@ -118,7 +118,7 @@ func toBool(b *bool) bool { return *b } -func prettyPrintStruct(obj interface{}) string { +func prettyPrintStruct(obj any) string { defer func() { if err := recover(); err != nil { fmt.Printf("Recovered from %v", err) diff --git a/commands/displayers/monitoring.go b/commands/displayers/monitoring.go index a390ee900..3243817cf 100644 --- a/commands/displayers/monitoring.go +++ b/commands/displayers/monitoring.go @@ -51,8 +51,8 @@ func (a *AlertPolicy) ColMap() map[string]string { } } -func (a *AlertPolicy) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(a.AlertPolicies)) +func (a *AlertPolicy) KV() []map[string]any { + out := make([]map[string]any, 0, len(a.AlertPolicies)) for _, x := range a.AlertPolicies { emails := "" @@ -67,7 +67,7 @@ func (a *AlertPolicy) KV() []map[string]interface{} { } slacks := strings.Join(slackChannels, ",") - o := map[string]interface{}{ + o := map[string]any{ "UUID": x.UUID, "Type": x.Type, "Description": x.Description, diff --git a/commands/displayers/namespaces.go b/commands/displayers/namespaces.go index a435f3843..7b4a70599 100644 --- a/commands/displayers/namespaces.go +++ b/commands/displayers/namespaces.go @@ -49,10 +49,10 @@ func (i *Namespaces) ColMap() map[string]string { } // KV is the displayer KV method specialized for namespaces list -func (i *Namespaces) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(i.Info)) +func (i *Namespaces) KV() []map[string]any { + out := make([]map[string]any, 0, len(i.Info)) for _, ii := range i.Info { - x := map[string]interface{}{ + x := map[string]any{ "Label": ii.Label, "Region": ii.Region, "ID": ii.Namespace, diff --git a/commands/displayers/output.go b/commands/displayers/output.go index ff0d9848a..ac89bd359 100644 --- a/commands/displayers/output.go +++ b/commands/displayers/output.go @@ -27,7 +27,7 @@ import ( type Displayable interface { Cols() []string ColMap() map[string]string - KV() []map[string]interface{} + KV() []map[string]any JSON(io.Writer) error } @@ -89,7 +89,7 @@ func DisplayText(item Displayable, out io.Writer, noHeaders bool, includeCols [] } for _, r := range item.KV() { - values := make([]interface{}, 0, len(cols)) + values := make([]any, 0, len(cols)) formats := make([]string, 0, len(cols)) for _, col := range cols { @@ -117,7 +117,7 @@ func DisplayText(item Displayable, out io.Writer, noHeaders bool, includeCols [] return w.Flush() } -func writeJSON(item interface{}, w io.Writer) error { +func writeJSON(item any, w io.Writer) error { b, err := json.Marshal(item) if err != nil { return err @@ -135,7 +135,7 @@ func writeJSON(item interface{}, w io.Writer) error { // containsOnlyNiSlice returns true if the given interface's concrete type is // a pointer to a struct that contains a single nil slice field. -func containsOnlyNilSlice(i interface{}) bool { +func containsOnlyNilSlice(i any) bool { if reflect.TypeOf(i).Kind() != reflect.Ptr { return false } diff --git a/commands/displayers/plugin.go b/commands/displayers/plugin.go index df522589d..92e2e74d6 100644 --- a/commands/displayers/plugin.go +++ b/commands/displayers/plugin.go @@ -42,11 +42,11 @@ func (p *Plugin) ColMap() map[string]string { } } -func (p *Plugin) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(p.Plugins)) +func (p *Plugin) KV() []map[string]any { + out := make([]map[string]any, 0, len(p.Plugins)) for _, plug := range p.Plugins { - o := map[string]interface{}{ + o := map[string]any{ "Name": plug.Name, } diff --git a/commands/displayers/project.go b/commands/displayers/project.go index 1135ac10f..f1a930c00 100644 --- a/commands/displayers/project.go +++ b/commands/displayers/project.go @@ -59,11 +59,11 @@ func (p *Project) ColMap() map[string]string { } } -func (p *Project) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(p.Projects)) +func (p *Project) KV() []map[string]any { + out := make([]map[string]any, 0, len(p.Projects)) for _, pr := range p.Projects { - o := map[string]interface{}{ + o := map[string]any{ "ID": pr.ID, "OwnerUUID": pr.OwnerUUID, "OwnerID": pr.OwnerID, @@ -107,8 +107,8 @@ func (p *ProjectResource) ColMap() map[string]string { } } -func (p *ProjectResource) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(p.ProjectResources)) +func (p *ProjectResource) KV() []map[string]any { + out := make([]map[string]any, 0, len(p.ProjectResources)) for _, pr := range p.ProjectResources { assignedAt := pr.AssignedAt @@ -116,7 +116,7 @@ func (p *ProjectResource) KV() []map[string]interface{} { assignedAt = "N/A" } - o := map[string]interface{}{ + o := map[string]any{ "URN": pr.URN, "AssignedAt": assignedAt, "Status": pr.Status, diff --git a/commands/displayers/rate_limit.go b/commands/displayers/rate_limit.go index 6d11f3f7c..aa3cd3538 100644 --- a/commands/displayers/rate_limit.go +++ b/commands/displayers/rate_limit.go @@ -41,10 +41,10 @@ func (rl *RateLimit) ColMap() map[string]string { } } -func (rl *RateLimit) KV() []map[string]interface{} { - x := map[string]interface{}{ +func (rl *RateLimit) KV() []map[string]any { + x := map[string]any{ "Limit": rl.Limit, "Remaining": rl.Remaining, "Reset": rl.Reset, } - return []map[string]interface{}{x} + return []map[string]any{x} } diff --git a/commands/displayers/region.go b/commands/displayers/region.go index a63fbf095..553a2a52f 100644 --- a/commands/displayers/region.go +++ b/commands/displayers/region.go @@ -41,11 +41,11 @@ func (re *Region) ColMap() map[string]string { } } -func (re *Region) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(re.Regions)) +func (re *Region) KV() []map[string]any { + out := make([]map[string]any, 0, len(re.Regions)) for _, r := range re.Regions { - o := map[string]interface{}{ + o := map[string]any{ "Slug": r.Slug, "Name": r.Name, "Available": r.Available, } diff --git a/commands/displayers/registry.go b/commands/displayers/registry.go index 3e22612c3..868af75d4 100644 --- a/commands/displayers/registry.go +++ b/commands/displayers/registry.go @@ -47,11 +47,11 @@ func (r *Registry) ColMap() map[string]string { } } -func (r *Registry) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(r.Registries)) +func (r *Registry) KV() []map[string]any { + out := make([]map[string]any, 0, len(r.Registries)) for _, reg := range r.Registries { - m := map[string]interface{}{ + m := map[string]any{ "Name": reg.Name, "Endpoint": reg.Endpoint(), "Region": reg.Region, @@ -91,11 +91,11 @@ func (r *Repository) ColMap() map[string]string { } } -func (r *Repository) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(r.Repositories)) +func (r *Repository) KV() []map[string]any { + out := make([]map[string]any, 0, len(r.Repositories)) for _, reg := range r.Repositories { - m := map[string]interface{}{ + m := map[string]any{ "Name": reg.Name, "LatestTag": reg.LatestTag.Tag, "TagCount": reg.TagCount, @@ -140,8 +140,8 @@ func (r *RepositoryV2) ColMap() map[string]string { } } -func (r *RepositoryV2) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(r.Repositories)) +func (r *RepositoryV2) KV() []map[string]any { + out := make([]map[string]any, 0, len(r.Repositories)) for _, reg := range r.Repositories { var latestManifest string @@ -156,7 +156,7 @@ func (r *RepositoryV2) KV() []map[string]interface{} { latestUpdate = ®.LatestManifest.UpdatedAt } - m := map[string]interface{}{ + m := map[string]any{ "Name": reg.Name, "LatestManifest": latestManifest, "LatestTag": latestTag, @@ -199,11 +199,11 @@ func (r *RepositoryTag) ColMap() map[string]string { } } -func (r *RepositoryTag) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(r.Tags)) +func (r *RepositoryTag) KV() []map[string]any { + out := make([]map[string]any, 0, len(r.Tags)) for _, tag := range r.Tags { - m := map[string]interface{}{ + m := map[string]any{ "Tag": tag.Tag, "CompressedSizeBytes": BytesToHumanReadableUnit(tag.CompressedSizeBytes), "UpdatedAt": tag.UpdatedAt, @@ -246,11 +246,11 @@ func (r *RepositoryManifest) ColMap() map[string]string { } } -func (r *RepositoryManifest) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(r.Manifests)) +func (r *RepositoryManifest) KV() []map[string]any { + out := make([]map[string]any, 0, len(r.Manifests)) for _, manifest := range r.Manifests { - m := map[string]interface{}{ + m := map[string]any{ "Digest": manifest.Digest, "CompressedSizeBytes": BytesToHumanReadableUnit(manifest.CompressedSizeBytes), "SizeBytes": BytesToHumanReadableUnit(manifest.SizeBytes), @@ -298,11 +298,11 @@ func (g *GarbageCollection) ColMap() map[string]string { } } -func (g *GarbageCollection) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(g.GarbageCollections)) +func (g *GarbageCollection) KV() []map[string]any { + out := make([]map[string]any, 0, len(g.GarbageCollections)) for _, gc := range g.GarbageCollections { - out = append(out, map[string]interface{}{ + out = append(out, map[string]any{ "UUID": gc.UUID, "RegistryName": gc.RegistryName, "Status": gc.Status, @@ -352,11 +352,11 @@ func (t *RegistrySubscriptionTiers) ColMap() map[string]string { } } -func (t *RegistrySubscriptionTiers) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(t.SubscriptionTiers)) +func (t *RegistrySubscriptionTiers) KV() []map[string]any { + out := make([]map[string]any, 0, len(t.SubscriptionTiers)) for _, tier := range t.SubscriptionTiers { - out = append(out, map[string]interface{}{ + out = append(out, map[string]any{ "Name": tier.Name, "Slug": tier.Slug, "IncludedRepositories": tier.IncludedRepositories, @@ -392,11 +392,11 @@ func (t *RegistryAvailableRegions) ColMap() map[string]string { } } -func (t *RegistryAvailableRegions) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(t.Regions)) +func (t *RegistryAvailableRegions) KV() []map[string]any { + out := make([]map[string]any, 0, len(t.Regions)) for _, region := range t.Regions { - out = append(out, map[string]interface{}{ + out = append(out, map[string]any{ "Slug": region, }) } diff --git a/commands/displayers/reserved_ip.go b/commands/displayers/reserved_ip.go index 1df088dca..c19c116eb 100644 --- a/commands/displayers/reserved_ip.go +++ b/commands/displayers/reserved_ip.go @@ -42,8 +42,8 @@ func (rip *ReservedIP) ColMap() map[string]string { } } -func (rip *ReservedIP) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(rip.ReservedIPs)) +func (rip *ReservedIP) KV() []map[string]any { + out := make([]map[string]any, 0, len(rip.ReservedIPs)) for _, f := range rip.ReservedIPs { var dropletID, dropletName string @@ -52,7 +52,7 @@ func (rip *ReservedIP) KV() []map[string]interface{} { dropletName = f.Droplet.Name } - o := map[string]interface{}{ + o := map[string]any{ "IP": f.IP, "Region": f.Region.Slug, "DropletID": dropletID, "DropletName": dropletName, "ProjectID": f.ProjectID, diff --git a/commands/displayers/size.go b/commands/displayers/size.go index 8f51659eb..7f3db26a4 100644 --- a/commands/displayers/size.go +++ b/commands/displayers/size.go @@ -45,11 +45,11 @@ func (si *Size) ColMap() map[string]string { } } -func (si *Size) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(si.Sizes)) +func (si *Size) KV() []map[string]any { + out := make([]map[string]any, 0, len(si.Sizes)) for _, s := range si.Sizes { - o := map[string]interface{}{ + o := map[string]any{ "Slug": s.Slug, "Description": s.Description, "Memory": s.Memory, "VCPUs": s.Vcpus, "Disk": s.Disk, "PriceMonthly": fmt.Sprintf("%0.2f", s.PriceMonthly), diff --git a/commands/displayers/snapshot.go b/commands/displayers/snapshot.go index a445997a3..9ef9f5e7a 100644 --- a/commands/displayers/snapshot.go +++ b/commands/displayers/snapshot.go @@ -42,11 +42,11 @@ func (s *Snapshot) ColMap() map[string]string { "ResourceId": "Resource ID", "ResourceType": "Resource Type", "MinDiskSize": "Min Disk Size", "Size": "Size", "Tags": "Tags"} } -func (s *Snapshot) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(s.Snapshots)) +func (s *Snapshot) KV() []map[string]any { + out := make([]map[string]any, 0, len(s.Snapshots)) for _, ss := range s.Snapshots { - o := map[string]interface{}{ + o := map[string]any{ "ID": ss.ID, "Name": ss.Name, "ResourceId": ss.ResourceID, "ResourceType": ss.ResourceType, "Regions": ss.Regions, "MinDiskSize": ss.MinDiskSize, "Size": strconv.FormatFloat(ss.SizeGigaBytes, 'f', 2, 64) + " GiB", "CreatedAt": ss.Created, diff --git a/commands/displayers/tag.go b/commands/displayers/tag.go index 6dfc6d24b..60839a69e 100644 --- a/commands/displayers/tag.go +++ b/commands/displayers/tag.go @@ -40,12 +40,12 @@ func (t *Tag) ColMap() map[string]string { } } -func (t *Tag) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(t.Tags)) +func (t *Tag) KV() []map[string]any { + out := make([]map[string]any, 0, len(t.Tags)) for _, x := range t.Tags { dropletCount := x.Resources.Droplets.Count - o := map[string]interface{}{ + o := map[string]any{ "Name": x.Name, "DropletCount": dropletCount, } diff --git a/commands/displayers/triggers.go b/commands/displayers/triggers.go index 69a116d2c..2079d7214 100644 --- a/commands/displayers/triggers.go +++ b/commands/displayers/triggers.go @@ -48,15 +48,15 @@ func (i *Triggers) ColMap() map[string]string { } // KV is the displayer KV method specialized for triggers list -func (i *Triggers) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(i.List)) +func (i *Triggers) KV() []map[string]any { + out := make([]map[string]any, 0, len(i.List)) for _, ii := range i.List { lastRun := "_" if ii.ScheduledRuns != nil && ii.ScheduledRuns.LastRunAt != nil && !ii.ScheduledRuns.LastRunAt.IsZero() { lastRun = ii.ScheduledRuns.LastRunAt.String() } - x := map[string]interface{}{ + x := map[string]any{ "Name": ii.Name, "Cron": ii.ScheduledDetails.Cron, "Function": ii.Function, diff --git a/commands/displayers/uptime_alert.go b/commands/displayers/uptime_alert.go index c63ee9f22..2c7b0ac64 100644 --- a/commands/displayers/uptime_alert.go +++ b/commands/displayers/uptime_alert.go @@ -47,8 +47,8 @@ func (ua *UptimeAlert) ColMap() map[string]string { } } -func (ua *UptimeAlert) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(ua.UptimeAlerts)) +func (ua *UptimeAlert) KV() []map[string]any { + out := make([]map[string]any, 0, len(ua.UptimeAlerts)) for _, uptimeAlert := range ua.UptimeAlerts { emails := "" if uptimeAlert.Notifications.Email != nil { @@ -62,7 +62,7 @@ func (ua *UptimeAlert) KV() []map[string]interface{} { } slacks := strings.Join(slackChannels, ",") - m := map[string]interface{}{ + m := map[string]any{ "ID": uptimeAlert.ID, "Name": uptimeAlert.Name, "Type": uptimeAlert.Type, diff --git a/commands/displayers/uptime_check.go b/commands/displayers/uptime_check.go index 9e43019b0..d14cc2d2e 100644 --- a/commands/displayers/uptime_check.go +++ b/commands/displayers/uptime_check.go @@ -48,10 +48,10 @@ func (uc *UptimeCheck) ColMap() map[string]string { } } -func (uc *UptimeCheck) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(uc.UptimeChecks)) +func (uc *UptimeCheck) KV() []map[string]any { + out := make([]map[string]any, 0, len(uc.UptimeChecks)) for _, uptimeCheck := range uc.UptimeChecks { - m := map[string]interface{}{ + m := map[string]any{ "ID": uptimeCheck.ID, "Name": uptimeCheck.Name, "Type": uptimeCheck.Type, diff --git a/commands/displayers/volume.go b/commands/displayers/volume.go index cf46ee05c..d43983ed3 100644 --- a/commands/displayers/volume.go +++ b/commands/displayers/volume.go @@ -53,10 +53,10 @@ func (a *Volume) ColMap() map[string]string { } -func (a *Volume) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(a.Volumes)) +func (a *Volume) KV() []map[string]any { + out := make([]map[string]any, 0, len(a.Volumes)) for _, volume := range a.Volumes { - m := map[string]interface{}{ + m := map[string]any{ "ID": volume.ID, "Name": volume.Name, "Size": strconv.FormatInt(volume.SizeGigaBytes, 10) + " GiB", diff --git a/commands/displayers/vpc.go b/commands/displayers/vpc.go index 260615249..712d15c10 100644 --- a/commands/displayers/vpc.go +++ b/commands/displayers/vpc.go @@ -55,11 +55,11 @@ func (v *VPC) ColMap() map[string]string { } } -func (v *VPC) KV() []map[string]interface{} { - out := make([]map[string]interface{}, 0, len(v.VPCs)) +func (v *VPC) KV() []map[string]any { + out := make([]map[string]any, 0, len(v.VPCs)) for _, v := range v.VPCs { - o := map[string]interface{}{ + o := map[string]any{ "ID": v.ID, "URN": v.URN, "Name": v.Name, diff --git a/commands/errors.go b/commands/errors.go index 325c504ff..a6070b0f6 100644 --- a/commands/errors.go +++ b/commands/errors.go @@ -97,13 +97,13 @@ func ensureOneArg(c *CmdConfig) error { } } -func warn(msg string, args ...interface{}) { +func warn(msg string, args ...any) { fmt.Fprintf(color.Output, "%s: %s\n", colorWarn, fmt.Sprintf(msg, args...)) } -func warnConfirm(msg string, args ...interface{}) { +func warnConfirm(msg string, args ...any) { fmt.Fprintf(color.Output, "%s: %s", colorWarn, fmt.Sprintf(msg, args...)) } -func notice(msg string, args ...interface{}) { +func notice(msg string, args ...any) { fmt.Fprintf(color.Output, "%s: %s\n", colorNotice, fmt.Sprintf(msg, args...)) } diff --git a/commands/firewalls.go b/commands/firewalls.go index 41d4d08b9..25f7ef44e 100644 --- a/commands/firewalls.go +++ b/commands/firewalls.go @@ -448,8 +448,8 @@ func extractOutboundRules(s string) (rules []godo.OutboundRule, err error) { return rules, nil } -func extractRule(ruleStr string, sd string) (map[string]interface{}, error) { - rule := map[string]interface{}{} +func extractRule(ruleStr string, sd string) (map[string]any, error) { + rule := map[string]any{} var dropletIDs []int var addresses, lbUIDs, k8sIDs, tags []string @@ -480,7 +480,7 @@ func extractRule(ruleStr string, sd string) (map[string]interface{}, error) { } } - rule[sd] = map[string]interface{}{ + rule[sd] = map[string]any{ "addresses": addresses, "droplet_ids": dropletIDs, "load_balancer_uids": lbUIDs, diff --git a/commands/functions.go b/commands/functions.go index c36be5af8..b031f2843 100644 --- a/commands/functions.go +++ b/commands/functions.go @@ -240,9 +240,9 @@ func RunFunctionsInvoke(c *CmdConfig) error { } web, _ := c.Doit.GetBool(c.NS, flagWeb) if web { - var mapParams map[string]interface{} = nil + var mapParams map[string]any = nil if params != nil { - p, ok := params.(map[string]interface{}) + p, ok := params.(map[string]any) if !ok { return fmt.Errorf("cannot invoke via web: parameters do not form a dictionary") } @@ -258,7 +258,7 @@ func RunFunctionsInvoke(c *CmdConfig) error { if err != nil { if response != nil { - activationResponse := response.(map[string]interface{}) + activationResponse := response.(map[string]any) template.Print(`Request accepted, but processing not completed yet. {{nl}}All functions invocation >= 30s will get demoted to an asynchronous invocation. Use {{highlight "--no-wait"}} flag to immediately return the activation id. {{nl}} Use this command to view the results. {{bold "doctl sls activations result" }} {{bold .}} {{nl 2}}`, activationResponse["activationId"]) @@ -323,8 +323,8 @@ func sortFunctionList(list []whisk.Action) { // consolidateParams accepts parameters from a file, the command line, or both, and consolidates all // such parameters into a simple dictionary. -func consolidateParams(paramFile string, params []string) (interface{}, error) { - consolidated := map[string]interface{}{} +func consolidateParams(paramFile string, params []string) (any, error) { + consolidated := map[string]any{} if len(paramFile) > 0 { contents, err := os.ReadFile(paramFile) if err != nil { diff --git a/commands/functions_test.go b/commands/functions_test.go index d989ffacb..f91cf645e 100644 --- a/commands/functions_test.go +++ b/commands/functions_test.go @@ -173,9 +173,9 @@ func TestFunctionsInvoke(t *testing.T) { tests := []struct { name string doctlArgs string - doctlFlags map[string]interface{} + doctlFlags map[string]any requestResult bool - passedParams interface{} + passedParams any }{ { name: "no flags", @@ -186,34 +186,34 @@ func TestFunctionsInvoke(t *testing.T) { { name: "full flag", doctlArgs: "hello", - doctlFlags: map[string]interface{}{"full": ""}, + doctlFlags: map[string]any{"full": ""}, requestResult: false, passedParams: nil, }, { name: "param flag", doctlArgs: "hello", - doctlFlags: map[string]interface{}{"param": "name:world"}, + doctlFlags: map[string]any{"param": "name:world"}, requestResult: true, - passedParams: map[string]interface{}{"name": "world"}, + passedParams: map[string]any{"name": "world"}, }, { name: "param flag list", doctlArgs: "hello", - doctlFlags: map[string]interface{}{"param": []string{"name:world", "address:everywhere"}}, + doctlFlags: map[string]any{"param": []string{"name:world", "address:everywhere"}}, requestResult: true, - passedParams: map[string]interface{}{"name": "world", "address": "everywhere"}, + passedParams: map[string]any{"name": "world", "address": "everywhere"}, }, { name: "param flag colon-value", doctlArgs: "hello", - doctlFlags: map[string]interface{}{"param": []string{"url:https://example.com"}}, + doctlFlags: map[string]any{"param": []string{"url:https://example.com"}}, requestResult: true, - passedParams: map[string]interface{}{"url": "https://example.com"}, + passedParams: map[string]any{"url": "https://example.com"}, }, } - expectedRemoteResult := map[string]interface{}{ + expectedRemoteResult := map[string]any{ "body": "Hello world!", } diff --git a/commands/load_balancers.go b/commands/load_balancers.go index d9bdec8dc..304940303 100644 --- a/commands/load_balancers.go +++ b/commands/load_balancers.go @@ -378,7 +378,7 @@ func extractForwardingRules(s string) (forwardingRules []godo.ForwardingRule, er return forwardingRules, err } -func fillStructFromStringSliceArgs(obj interface{}, s string) error { +func fillStructFromStringSliceArgs(obj any, s string) error { if len(s) == 0 { return nil } diff --git a/commands/namespaces_test.go b/commands/namespaces_test.go index 83a0c0620..22737e15d 100644 --- a/commands/namespaces_test.go +++ b/commands/namespaces_test.go @@ -40,7 +40,7 @@ func TestNamespacesCommand(t *testing.T) { func TestNamespacesCreate(t *testing.T) { tests := []struct { name string - doctlFlags map[string]interface{} + doctlFlags map[string]any expectedOutput string expectedError error expectList bool @@ -52,7 +52,7 @@ func TestNamespacesCreate(t *testing.T) { }, { name: "invalid region", - doctlFlags: map[string]interface{}{ + doctlFlags: map[string]any{ "label": "my_dog", "region": "dog", }, @@ -60,7 +60,7 @@ func TestNamespacesCreate(t *testing.T) { }, { name: "legal flags, with no-connect", - doctlFlags: map[string]interface{}{ + doctlFlags: map[string]any{ "label": "something", "region": "lon", "no-connect": true, @@ -70,7 +70,7 @@ func TestNamespacesCreate(t *testing.T) { }, { name: "legal flags, with label conflict", - doctlFlags: map[string]interface{}{ + doctlFlags: map[string]any{ "label": "my_dog", "region": "lon", }, @@ -79,7 +79,7 @@ func TestNamespacesCreate(t *testing.T) { }, { name: "legal flags, should connect", - doctlFlags: map[string]interface{}{ + doctlFlags: map[string]any{ "label": "something", "region": "lon", }, @@ -184,7 +184,7 @@ func TestNamespacesList(t *testing.T) { func TestNamespacesDelete(t *testing.T) { tests := []struct { name string - doctlFlags map[string]interface{} + doctlFlags map[string]any doctlArg string expectedOutput string expectedError error @@ -203,7 +203,7 @@ func TestNamespacesDelete(t *testing.T) { { name: "valid argument with force", doctlArg: "my_dog", - doctlFlags: map[string]interface{}{"force": true}, + doctlFlags: map[string]any{"force": true}, }, { name: "valid argument with prompt", diff --git a/commands/serverless_test.go b/commands/serverless_test.go index 168841248..c96433828 100644 --- a/commands/serverless_test.go +++ b/commands/serverless_test.go @@ -359,7 +359,7 @@ func TestServerlessInit(t *testing.T) { name string doctlArgs string doctlFlags map[string]string - out map[string]interface{} + out map[string]any expectCheck bool expectOverwrite bool }{ @@ -368,21 +368,21 @@ func TestServerlessInit(t *testing.T) { doctlArgs: "path/to/foo", // The language flag has a default normally applied by cobra doctlFlags: map[string]string{"language": "javascript"}, - out: map[string]interface{}{"project": "foo"}, + out: map[string]any{"project": "foo"}, }, { name: "overwrite", doctlArgs: "path/to/foo", // The language flag has a default normally applied by cobra doctlFlags: map[string]string{"overwrite": "", "language": "javascript"}, - out: map[string]interface{}{"project": "foo"}, + out: map[string]any{"project": "foo"}, expectOverwrite: true, }, { name: "language flag", doctlArgs: "path/to/foo", doctlFlags: map[string]string{"language": "go"}, - out: map[string]interface{}{"project": "foo"}, + out: map[string]any{"project": "foo"}, expectCheck: true, }, } diff --git a/commands/serverless_util.go b/commands/serverless_util.go index 716df037d..4475f4249 100644 --- a/commands/serverless_util.go +++ b/commands/serverless_util.go @@ -129,7 +129,7 @@ func isServerlessConnected(leafCredsDir string, serverlessDir string) bool { // Converts something "object-like" but untyped to generic JSON // Designed for human eyes; does not provide an explicit error // result -func genericJSON(toFormat interface{}) string { +func genericJSON(toFormat any) string { bytes, err := json.MarshalIndent(&toFormat, "", " ") if err != nil { return "" diff --git a/commands/triggers_test.go b/commands/triggers_test.go index 3989a3284..bf0f345ae 100644 --- a/commands/triggers_test.go +++ b/commands/triggers_test.go @@ -54,7 +54,7 @@ func TestTriggersGet(t *testing.T) { UpdatedAt: time.Date(2022, 10, 17, 18, 41, 30, 0, time.UTC), ScheduledDetails: &do.TriggerScheduledDetails{ Cron: "5 * * * *", - Body: map[string]interface{}{ + Body: map[string]any{ "foo": "bar", }, }, @@ -85,7 +85,7 @@ func TestTriggersList(t *testing.T) { Type: "SCHEDULED", ScheduledDetails: &do.TriggerScheduledDetails{ Cron: "* * * * *", - Body: map[string]interface{}{}, + Body: map[string]any{}, }, IsEnabled: true, CreatedAt: time.Date(2022, 10, 5, 13, 46, 59, 0, time.UTC), @@ -115,14 +115,14 @@ func TestTriggersList(t *testing.T) { } tests := []struct { name string - doctlFlags map[string]interface{} + doctlFlags map[string]any expectedOutput string listArg string listResult []do.ServerlessTrigger }{ { name: "simple list", - doctlFlags: map[string]interface{}{ + doctlFlags: map[string]any{ "no-header": "", }, listResult: theList, @@ -133,7 +133,7 @@ firePoll2 10 * * * * misc/pollStatus false _ }, { name: "filtered list", - doctlFlags: map[string]interface{}{ + doctlFlags: map[string]any{ "function": "misc/pollStatus", "no-header": "", }, diff --git a/do/1_clicks.go b/do/1_clicks.go index 41b9e944c..45e1afdc9 100644 --- a/do/1_clicks.go +++ b/do/1_clicks.go @@ -49,13 +49,13 @@ func NewOneClickService(client *godo.Client) OneClickService { } func (ocs *oneClickService) List(oneClickType string) (OneClicks, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ocs.Client.OneClick.List(context.TODO(), oneClickType) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/account_test.go b/do/account_test.go index 3b3a3cc4b..26f30bc54 100644 --- a/do/account_test.go +++ b/do/account_test.go @@ -58,7 +58,7 @@ func (m *MockAccountService) Get(arg0 context.Context) (*godo.Account, *godo.Res } // Get indicates an expected call of Get -func (mr *MockAccountServiceMockRecorder) Get(arg0 interface{}) *gomock.Call { +func (mr *MockAccountServiceMockRecorder) Get(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockAccountService)(nil).Get), arg0) } diff --git a/do/actions.go b/do/actions.go index 468fe3f68..4402ca7a5 100644 --- a/do/actions.go +++ b/do/actions.go @@ -47,13 +47,13 @@ func NewActionsService(godoClient *godo.Client) ActionsService { } func (as *actionsService) List() (Actions, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := as.client.Actions.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/apps.go b/do/apps.go index 5c050757b..d236d6cb0 100644 --- a/do/apps.go +++ b/do/apps.go @@ -81,14 +81,14 @@ func (s *appsService) Get(appID string) (*godo.App, error) { } func (s *appsService) List(withProjects bool) ([]*godo.App, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { opt.WithProjects = withProjects list, resp, err := s.client.Apps.List(s.ctx, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, 0, len(list)) + si := make([]any, 0, len(list)) for _, item := range list { si = append(si, item) } @@ -150,13 +150,13 @@ func (s *appsService) GetDeployment(appID, deploymentID string) (*godo.Deploymen } func (s *appsService) ListDeployments(appID string) ([]*godo.Deployment, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := s.client.Apps.ListDeployments(s.ctx, appID, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, 0, len(list)) + si := make([]any, 0, len(list)) for _, item := range list { si = append(si, item) } diff --git a/do/balance_test.go b/do/balance_test.go index 88ffaf0d9..e0f742c2c 100644 --- a/do/balance_test.go +++ b/do/balance_test.go @@ -58,7 +58,7 @@ func (m *MockBalanceService) Get(arg0 context.Context) (*godo.Balance, *godo.Res } // Get indicates an expected call of Get -func (mr *MockBalanceServiceMockRecorder) Get(arg0 interface{}) *gomock.Call { +func (mr *MockBalanceServiceMockRecorder) Get(arg0 any) *gomock.Call { mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Get", reflect.TypeOf((*MockBalanceService)(nil).Get), arg0) } diff --git a/do/billing_history.go b/do/billing_history.go index f73150b04..0634bb0d1 100644 --- a/do/billing_history.go +++ b/do/billing_history.go @@ -43,13 +43,13 @@ func NewBillingHistoryService(client *godo.Client) BillingHistoryService { } func (is *billingHistoryService) List() (*BillingHistory, error) { - listFn := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + listFn := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { historyList, resp, err := is.client.BillingHistory.List(context.Background(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(historyList.BillingHistory)) + si := make([]any, len(historyList.BillingHistory)) for i := range historyList.BillingHistory { si[i] = historyList.BillingHistory[i] } diff --git a/do/cdns.go b/do/cdns.go index 9f19a3251..db7db83b7 100644 --- a/do/cdns.go +++ b/do/cdns.go @@ -34,13 +34,13 @@ func NewCDNsService(godoClient *godo.Client) CDNsService { } func (c *cdnsService) List() ([]CDN, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := c.client.CDNs.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/certificates.go b/do/certificates.go index dc499b9e5..b381a2a0c 100644 --- a/do/certificates.go +++ b/do/certificates.go @@ -67,13 +67,13 @@ func (cs *certificatesService) Create(cr *godo.CertificateRequest) (*Certificate } func (cs *certificatesService) List() (Certificates, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := cs.client.Certificates.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/databases.go b/do/databases.go index 9c5016e75..75c09ccd6 100644 --- a/do/databases.go +++ b/do/databases.go @@ -15,6 +15,7 @@ package do import ( "context" + "encoding/json" "strings" "github.com/digitalocean/godo" @@ -111,6 +112,19 @@ type RedisConfig struct { *godo.RedisConfig } +// DatabaseTopics is a slice of DatabaseTopic +type DatabaseTopics []DatabaseTopic + +// DatabaseTopic is a wrapper for godo.DatabaseTopic +type DatabaseTopic struct { + *godo.DatabaseTopic +} + +// DatabaseTopicPartitions is a slice of *godo.TopicPartition +type DatabaseTopicPartitions struct { + Partitions []*godo.TopicPartition +} + // DatabasesService is an interface for interacting with DigitalOcean's Database API type DatabasesService interface { List() (Databases, error) @@ -159,6 +173,16 @@ type DatabasesService interface { GetMySQLConfiguration(databaseID string) (*MySQLConfig, error) GetPostgreSQLConfiguration(databaseID string) (*PostgreSQLConfig, error) GetRedisConfiguration(databaseID string) (*RedisConfig, error) + + UpdateMySQLConfiguration(databaseID string, confString string) error + UpdatePostgreSQLConfiguration(databaseID string, confString string) error + UpdateRedisConfiguration(databaseID string, confString string) error + + ListTopics(string) (DatabaseTopics, error) + GetTopic(string, string) (*DatabaseTopic, error) + CreateTopic(string, *godo.DatabaseCreateTopicRequest) (*DatabaseTopic, error) + UpdateTopic(string, string, *godo.DatabaseUpdateTopicRequest) error + DeleteTopic(string, string) error } type databasesService struct { @@ -175,13 +199,13 @@ func NewDatabasesService(client *godo.Client) DatabasesService { } func (ds *databasesService) List() (Databases, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Databases.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -273,13 +297,13 @@ func (ds *databasesService) UpdateMaintenance(databaseID string, req *godo.Datab } func (ds *databasesService) ListBackups(databaseID string) (DatabaseBackups, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Databases.ListBackups(context.TODO(), databaseID, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -310,13 +334,13 @@ func (ds *databasesService) GetUser(databaseID, userName string) (*DatabaseUser, } func (ds *databasesService) ListUsers(databaseID string) (DatabaseUsers, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Databases.ListUsers(context.TODO(), databaseID, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -361,13 +385,13 @@ func (ds *databasesService) ResetUserAuth(databaseID, userID string, req *godo.D } func (ds *databasesService) ListDBs(databaseID string) (DatabaseDBs, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Databases.ListDBs(context.TODO(), databaseID, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -413,13 +437,13 @@ func (ds *databasesService) DeleteDB(databaseID, dbID string) error { } func (ds *databasesService) ListPools(databaseID string) (DatabasePools, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Databases.ListPools(context.TODO(), databaseID, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -474,13 +498,13 @@ func (ds *databasesService) GetReplica(databaseID, replicaID string) (*DatabaseR } func (ds *databasesService) ListReplicas(databaseID string) (DatabaseReplicas, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Databases.ListReplicas(context.TODO(), databaseID, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -544,13 +568,13 @@ func (ds *databasesService) SetSQLMode(databaseID string, sqlModes ...string) er } func (ds *databasesService) GetFirewallRules(databaseID string) (DatabaseFirewallRules, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Databases.GetFirewallRules(context.TODO(), databaseID) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -618,3 +642,106 @@ func (ds *databasesService) GetRedisConfiguration(databaseID string) (*RedisConf RedisConfig: cfg, }, nil } + +func (ds *databasesService) UpdateMySQLConfiguration(databaseID string, confString string) error { + var conf godo.MySQLConfig + err := json.Unmarshal([]byte(confString), &conf) + if err != nil { + return err + } + + _, err = ds.client.Databases.UpdateMySQLConfig(context.TODO(), databaseID, &conf) + if err != nil { + return err + } + + return nil +} + +func (ds *databasesService) UpdatePostgreSQLConfiguration(databaseID string, confString string) error { + var conf godo.PostgreSQLConfig + err := json.Unmarshal([]byte(confString), &conf) + if err != nil { + return err + } + + _, err = ds.client.Databases.UpdatePostgreSQLConfig(context.TODO(), databaseID, &conf) + if err != nil { + return err + } + + return nil +} + +func (ds *databasesService) UpdateRedisConfiguration(databaseID string, confString string) error { + var conf godo.RedisConfig + err := json.Unmarshal([]byte(confString), &conf) + if err != nil { + return err + } + + _, err = ds.client.Databases.UpdateRedisConfig(context.TODO(), databaseID, &conf) + if err != nil { + return err + } + + return nil +} + +func (ds *databasesService) ListTopics(databaseID string) (DatabaseTopics, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { + list, resp, err := ds.client.Databases.ListTopics(context.TODO(), databaseID, opt) + if err != nil { + return nil, nil, err + } + + si := make([]any, len(list)) + for i := range list { + si[i] = list[i] + } + + return si, resp, err + } + + si, err := PaginateResp(f) + if err != nil { + return nil, err + } + + list := make(DatabaseTopics, len(si)) + for i := range si { + t := si[i].(godo.DatabaseTopic) + list[i] = DatabaseTopic{DatabaseTopic: &t} + } + return list, nil +} + +func (ds *databasesService) CreateTopic(databaseID string, req *godo.DatabaseCreateTopicRequest) (*DatabaseTopic, error) { + t, _, err := ds.client.Databases.CreateTopic(context.TODO(), databaseID, req) + if err != nil { + return nil, err + } + + return &DatabaseTopic{DatabaseTopic: t}, nil +} + +func (ds *databasesService) UpdateTopic(databaseID, topicName string, req *godo.DatabaseUpdateTopicRequest) error { + _, err := ds.client.Databases.UpdateTopic(context.TODO(), databaseID, topicName, req) + + return err +} + +func (ds *databasesService) GetTopic(databaseID, topicName string) (*DatabaseTopic, error) { + t, _, err := ds.client.Databases.GetTopic(context.TODO(), databaseID, topicName) + if err != nil { + return nil, err + } + + return &DatabaseTopic{DatabaseTopic: t}, nil +} + +func (ds *databasesService) DeleteTopic(databaseID, topicName string) error { + _, err := ds.client.Databases.DeleteTopic(context.TODO(), databaseID, topicName) + + return err +} diff --git a/do/domains.go b/do/domains.go index 487082487..a6399c7d4 100644 --- a/do/domains.go +++ b/do/domains.go @@ -85,13 +85,13 @@ func NewDomainsService(client *godo.Client) DomainsService { } func (ds *domainsService) List() (Domains, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Domains.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -137,13 +137,13 @@ func (ds *domainsService) Delete(name string) error { } func (ds *domainsService) Records(name string) (DomainRecords, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Domains.Records(context.TODO(), name, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/droplets.go b/do/droplets.go index 87f7dc139..b812977a8 100644 --- a/do/droplets.go +++ b/do/droplets.go @@ -79,13 +79,13 @@ func NewDropletsService(client *godo.Client) DropletsService { } func (ds *dropletsService) List() (Droplets, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Droplets.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -108,13 +108,13 @@ func (ds *dropletsService) List() (Droplets, error) { } func (ds *dropletsService) ListByTag(tagName string) (Droplets, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Droplets.ListByTag(context.TODO(), tagName, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -198,13 +198,13 @@ func (ds *dropletsService) DeleteByTag(tag string) error { } func (ds *dropletsService) Kernels(id int) (Kernels, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Droplets.Kernels(context.TODO(), id, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -227,13 +227,13 @@ func (ds *dropletsService) Kernels(id int) (Kernels, error) { } func (ds *dropletsService) Snapshots(id int) (Images, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Droplets.Snapshots(context.TODO(), id, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -256,13 +256,13 @@ func (ds *dropletsService) Snapshots(id int) (Images, error) { } func (ds *dropletsService) Backups(id int) (Images, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Droplets.Backups(context.TODO(), id, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -285,13 +285,13 @@ func (ds *dropletsService) Backups(id int) (Images, error) { } func (ds *dropletsService) Actions(id int) (Actions, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ds.client.Droplets.Actions(context.TODO(), id, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/firewalls.go b/do/firewalls.go index 5b75a34f2..7d02c8bbd 100644 --- a/do/firewalls.go +++ b/do/firewalls.go @@ -83,13 +83,13 @@ func (fs *firewallsService) Update(fID string, fr *godo.FirewallRequest) (*Firew } func (fs *firewallsService) List() (Firewalls, error) { - listFn := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + listFn := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := fs.client.Firewalls.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -101,13 +101,13 @@ func (fs *firewallsService) List() (Firewalls, error) { } func (fs *firewallsService) ListByDroplet(dID int) (Firewalls, error) { - listFn := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + listFn := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := fs.client.Firewalls.ListByDroplet(context.TODO(), dID, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -153,7 +153,7 @@ func (fs *firewallsService) RemoveRules(fID string, rr *godo.FirewallRulesReques return err } -func paginatedListHelper(listFn func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error)) (Firewalls, error) { +func paginatedListHelper(listFn func(opt *godo.ListOptions) ([]any, *godo.Response, error)) (Firewalls, error) { si, err := PaginateResp(listFn) if err != nil { return nil, err diff --git a/do/images.go b/do/images.go index 39f95e46a..472ecff24 100644 --- a/do/images.go +++ b/do/images.go @@ -113,13 +113,13 @@ func (is *imagesService) Create(icr *godo.CustomImageCreateRequest) (*Image, err type listFn func(context.Context, *godo.ListOptions) ([]godo.Image, *godo.Response, error) func (is *imagesService) listImages(lFn listFn, public bool) (Images, error) { - fn := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + fn := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := lFn(context.TODO(), opt) if err != nil { return nil, nil, err } - si := []interface{}{} + si := []any{} for _, i := range list { if (public && i.Public) || !i.Public { si = append(si, i) diff --git a/do/invoices.go b/do/invoices.go index c8bc649f0..f5ba2a0a8 100644 --- a/do/invoices.go +++ b/do/invoices.go @@ -64,14 +64,14 @@ func NewInvoicesService(client *godo.Client) InvoicesService { func (is *invoicesService) List() (*InvoiceList, error) { var invoicePreview godo.InvoiceListItem - listFn := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + listFn := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { invoiceList, resp, err := is.client.Invoices.List(context.Background(), opt) if err != nil { return nil, nil, err } invoicePreview = invoiceList.InvoicePreview - si := make([]interface{}, len(invoiceList.Invoices)) + si := make([]any, len(invoiceList.Invoices)) for i := range invoiceList.Invoices { si[i] = invoiceList.Invoices[i] } @@ -96,12 +96,12 @@ func (is *invoicesService) List() (*InvoiceList, error) { } func (is *invoicesService) Get(uuid string) (*Invoice, error) { - listFn := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + listFn := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { invoice, resp, err := is.client.Invoices.Get(context.Background(), uuid, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(invoice.InvoiceItems)) + si := make([]any, len(invoice.InvoiceItems)) for i := range invoice.InvoiceItems { si[i] = invoice.InvoiceItems[i] } diff --git a/do/kubernetes.go b/do/kubernetes.go index f28fb0621..c57bd32ff 100644 --- a/do/kubernetes.go +++ b/do/kubernetes.go @@ -169,13 +169,13 @@ func (k8s *kubernetesClusterService) GetUpgrades(clusterID string) (KubernetesVe } func (k8s *kubernetesClusterService) List() (KubernetesClusters, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := k8s.client.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, 0, len(list)) + si := make([]any, 0, len(list)) for _, item := range list { si = append(si, item) } @@ -262,13 +262,13 @@ func (k8s *kubernetesClusterService) GetNodePool(clusterID, poolID string) (*Kub } func (k8s *kubernetesClusterService) ListNodePools(clusterID string) (KubernetesNodePools, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := k8s.client.ListNodePools(context.TODO(), clusterID, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, 0, len(list)) + si := make([]any, 0, len(list)) for _, item := range list { si = append(si, item) } diff --git a/do/load_balancers.go b/do/load_balancers.go index 5756dcce9..1118f67ed 100644 --- a/do/load_balancers.go +++ b/do/load_balancers.go @@ -63,13 +63,13 @@ func (lbs *loadBalancersService) Get(lbID string) (*LoadBalancer, error) { } func (lbs *loadBalancersService) List() (LoadBalancers, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := lbs.client.LoadBalancers.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/mocks/DatabasesService.go b/do/mocks/DatabasesService.go index 518c339aa..452c28a20 100644 --- a/do/mocks/DatabasesService.go +++ b/do/mocks/DatabasesService.go @@ -99,6 +99,21 @@ func (mr *MockDatabasesServiceMockRecorder) CreateReplica(arg0, arg1 any) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateReplica", reflect.TypeOf((*MockDatabasesService)(nil).CreateReplica), arg0, arg1) } +// CreateTopic mocks base method. +func (m *MockDatabasesService) CreateTopic(arg0 string, arg1 *godo.DatabaseCreateTopicRequest) (*do.DatabaseTopic, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateTopic", arg0, arg1) + ret0, _ := ret[0].(*do.DatabaseTopic) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// CreateTopic indicates an expected call of CreateTopic. +func (mr *MockDatabasesServiceMockRecorder) CreateTopic(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateTopic", reflect.TypeOf((*MockDatabasesService)(nil).CreateTopic), arg0, arg1) +} + // CreateUser mocks base method. func (m *MockDatabasesService) CreateUser(arg0 string, arg1 *godo.DatabaseCreateUserRequest) (*do.DatabaseUser, error) { m.ctrl.T.Helper() @@ -170,6 +185,20 @@ func (mr *MockDatabasesServiceMockRecorder) DeleteReplica(arg0, arg1 any) *gomoc return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteReplica", reflect.TypeOf((*MockDatabasesService)(nil).DeleteReplica), arg0, arg1) } +// DeleteTopic mocks base method. +func (m *MockDatabasesService) DeleteTopic(arg0, arg1 string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "DeleteTopic", arg0, arg1) + ret0, _ := ret[0].(error) + return ret0 +} + +// DeleteTopic indicates an expected call of DeleteTopic. +func (mr *MockDatabasesServiceMockRecorder) DeleteTopic(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "DeleteTopic", reflect.TypeOf((*MockDatabasesService)(nil).DeleteTopic), arg0, arg1) +} + // DeleteUser mocks base method. func (m *MockDatabasesService) DeleteUser(arg0, arg1 string) error { m.ctrl.T.Helper() @@ -364,6 +393,21 @@ func (mr *MockDatabasesServiceMockRecorder) GetSQLMode(arg0 any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetSQLMode", reflect.TypeOf((*MockDatabasesService)(nil).GetSQLMode), arg0) } +// GetTopic mocks base method. +func (m *MockDatabasesService) GetTopic(arg0, arg1 string) (*do.DatabaseTopic, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetTopic", arg0, arg1) + ret0, _ := ret[0].(*do.DatabaseTopic) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetTopic indicates an expected call of GetTopic. +func (mr *MockDatabasesServiceMockRecorder) GetTopic(arg0, arg1 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetTopic", reflect.TypeOf((*MockDatabasesService)(nil).GetTopic), arg0, arg1) +} + // GetUser mocks base method. func (m *MockDatabasesService) GetUser(arg0, arg1 string) (*do.DatabaseUser, error) { m.ctrl.T.Helper() @@ -469,6 +513,21 @@ func (mr *MockDatabasesServiceMockRecorder) ListReplicas(arg0 any) *gomock.Call return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListReplicas", reflect.TypeOf((*MockDatabasesService)(nil).ListReplicas), arg0) } +// ListTopics mocks base method. +func (m *MockDatabasesService) ListTopics(arg0 string) (do.DatabaseTopics, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "ListTopics", arg0) + ret0, _ := ret[0].(do.DatabaseTopics) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// ListTopics indicates an expected call of ListTopics. +func (mr *MockDatabasesServiceMockRecorder) ListTopics(arg0 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListTopics", reflect.TypeOf((*MockDatabasesService)(nil).ListTopics), arg0) +} + // ListUsers mocks base method. func (m *MockDatabasesService) ListUsers(arg0 string) (do.DatabaseUsers, error) { m.ctrl.T.Helper() @@ -587,3 +646,59 @@ func (mr *MockDatabasesServiceMockRecorder) UpdateMaintenance(arg0, arg1 any) *g mr.mock.ctrl.T.Helper() return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateMaintenance", reflect.TypeOf((*MockDatabasesService)(nil).UpdateMaintenance), arg0, arg1) } + +// UpdateMySQLConfiguration mocks base method. +func (m *MockDatabasesService) UpdateMySQLConfiguration(databaseID, confString string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateMySQLConfiguration", databaseID, confString) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateMySQLConfiguration indicates an expected call of UpdateMySQLConfiguration. +func (mr *MockDatabasesServiceMockRecorder) UpdateMySQLConfiguration(databaseID, confString any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateMySQLConfiguration", reflect.TypeOf((*MockDatabasesService)(nil).UpdateMySQLConfiguration), databaseID, confString) +} + +// UpdatePostgreSQLConfiguration mocks base method. +func (m *MockDatabasesService) UpdatePostgreSQLConfiguration(databaseID, confString string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdatePostgreSQLConfiguration", databaseID, confString) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdatePostgreSQLConfiguration indicates an expected call of UpdatePostgreSQLConfiguration. +func (mr *MockDatabasesServiceMockRecorder) UpdatePostgreSQLConfiguration(databaseID, confString any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdatePostgreSQLConfiguration", reflect.TypeOf((*MockDatabasesService)(nil).UpdatePostgreSQLConfiguration), databaseID, confString) +} + +// UpdateRedisConfiguration mocks base method. +func (m *MockDatabasesService) UpdateRedisConfiguration(databaseID, confString string) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateRedisConfiguration", databaseID, confString) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateRedisConfiguration indicates an expected call of UpdateRedisConfiguration. +func (mr *MockDatabasesServiceMockRecorder) UpdateRedisConfiguration(databaseID, confString any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateRedisConfiguration", reflect.TypeOf((*MockDatabasesService)(nil).UpdateRedisConfiguration), databaseID, confString) +} + +// UpdateTopic mocks base method. +func (m *MockDatabasesService) UpdateTopic(arg0, arg1 string, arg2 *godo.DatabaseUpdateTopicRequest) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "UpdateTopic", arg0, arg1, arg2) + ret0, _ := ret[0].(error) + return ret0 +} + +// UpdateTopic indicates an expected call of UpdateTopic. +func (mr *MockDatabasesServiceMockRecorder) UpdateTopic(arg0, arg1, arg2 any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "UpdateTopic", reflect.TypeOf((*MockDatabasesService)(nil).UpdateTopic), arg0, arg1, arg2) +} diff --git a/do/monitoring.go b/do/monitoring.go index ef118679b..c29bd1b7c 100644 --- a/do/monitoring.go +++ b/do/monitoring.go @@ -50,13 +50,13 @@ func NewMonitoringService(godoClient *godo.Client) MonitoringService { } func (ms *monitoringService) ListAlertPolicies() (AlertPolicies, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ms.client.Monitoring.ListAlertPolicies(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/pagination.go b/do/pagination.go index 26971aac3..227b8c315 100644 --- a/do/pagination.go +++ b/do/pagination.go @@ -29,12 +29,12 @@ var perPage = 200 var fetchFn = fetchPage type paginatedList struct { - list [][]interface{} + list [][]any total int mu sync.Mutex } -func (pl *paginatedList) set(page int, items []interface{}) { +func (pl *paginatedList) set(page int, items []any) { pl.mu.Lock() defer pl.mu.Unlock() pl.total += len(items) @@ -42,10 +42,10 @@ func (pl *paginatedList) set(page int, items []interface{}) { } // Generator is a function that generates the list to be paginated. -type Generator func(*godo.ListOptions) ([]interface{}, *godo.Response, error) +type Generator func(*godo.ListOptions) ([]any, *godo.Response, error) // PaginateResp paginates a Response. -func PaginateResp(gen Generator) ([]interface{}, error) { +func PaginateResp(gen Generator) ([]any, error) { opt := &godo.ListOptions{Page: 1, PerPage: perPage} // fetch first page to get page count (x) @@ -61,7 +61,7 @@ func PaginateResp(gen Generator) ([]interface{}, error) { } l := paginatedList{ - list: make([][]interface{}, lp), + list: make([][]any, lp), } // set results from the first page @@ -93,7 +93,7 @@ func PaginateResp(gen Generator) ([]interface{}, error) { wg.Wait() // flatten paginated list - items := make([]interface{}, l.total)[:0] + items := make([]any, l.total)[:0] for _, page := range l.list { if page == nil { // must have been an error getting page results @@ -107,7 +107,7 @@ func PaginateResp(gen Generator) ([]interface{}, error) { return items, nil } -func fetchPage(gen Generator, page int) ([]interface{}, error) { +func fetchPage(gen Generator, page int) ([]any, error) { opt := &godo.ListOptions{Page: page, PerPage: perPage} items, _, err := gen(opt) return items, err diff --git a/do/pagination_test.go b/do/pagination_test.go index e267ecb7a..b7fc3ada2 100644 --- a/do/pagination_test.go +++ b/do/pagination_test.go @@ -26,11 +26,11 @@ func Test_PaginateResp(t *testing.T) { currentPage := 0 resp := &godo.Response{Links: &godo.Links{Pages: &godo.Pages{Last: "http://example.com/?page=5"}}} - gen := func(*godo.ListOptions) ([]interface{}, *godo.Response, error) { + gen := func(*godo.ListOptions) ([]any, *godo.Response, error) { mu.Lock() defer mu.Unlock() currentPage++ - return []interface{}{currentPage}, resp, nil + return []any{currentPage}, resp, nil } list, err := PaginateResp(gen) @@ -40,8 +40,8 @@ func Test_PaginateResp(t *testing.T) { } func Test_Pagination_fetchPage(t *testing.T) { - gen := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { - items := []interface{}{} + gen := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { + items := []any{} resp := &godo.Response{} assert.Equal(t, 10, opt.Page) diff --git a/do/projects.go b/do/projects.go index f4124aebe..de72b85f9 100644 --- a/do/projects.go +++ b/do/projects.go @@ -66,13 +66,13 @@ func NewProjectsService(client *godo.Client) ProjectsService { // List projects. func (ps *projectsService) List() (Projects, error) { - listFn := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + listFn := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ps.client.Projects.List(ps.ctx, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -125,13 +125,13 @@ func (ps *projectsService) Delete(projectUUID string) error { } func (ps *projectsService) ListResources(projectUUID string) (ProjectResources, error) { - listFn := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + listFn := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ps.client.Projects.ListResources(ps.ctx, projectUUID, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -143,7 +143,7 @@ func (ps *projectsService) ListResources(projectUUID string) (ProjectResources, } func (ps *projectsService) AssignResources(projectUUID string, resources []string) (ProjectResources, error) { - assignableResources := make([]interface{}, len(resources)) + assignableResources := make([]any, len(resources)) for i, resource := range resources { assignableResources[i] = resource } @@ -161,7 +161,7 @@ func (ps *projectsService) AssignResources(projectUUID string, resources []strin return prs, err } -func projectsPaginatedListHelper(listFn func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error)) (Projects, error) { +func projectsPaginatedListHelper(listFn func(opt *godo.ListOptions) ([]any, *godo.Response, error)) (Projects, error) { si, err := PaginateResp(listFn) if err != nil { return nil, err @@ -180,7 +180,7 @@ func projectsPaginatedListHelper(listFn func(opt *godo.ListOptions) ([]interface return list, nil } -func projectResourcesPaginatedListHelper(listFn func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error)) (ProjectResources, error) { +func projectResourcesPaginatedListHelper(listFn func(opt *godo.ListOptions) ([]any, *godo.Response, error)) (ProjectResources, error) { si, err := PaginateResp(listFn) if err != nil { return nil, err diff --git a/do/regions.go b/do/regions.go index dc33ac8e8..669a6fc83 100644 --- a/do/regions.go +++ b/do/regions.go @@ -46,13 +46,13 @@ func NewRegionsService(client *godo.Client) RegionsService { } func (rs *regionsService) List() (Regions, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := rs.client.Regions.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/registry.go b/do/registry.go index 429b398c7..a5174b80b 100644 --- a/do/registry.go +++ b/do/registry.go @@ -137,13 +137,13 @@ func (rs *registryService) DockerCredentials(request *godo.RegistryDockerCredent } func (rs *registryService) ListRepositories(registry string) ([]Repository, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := rs.client.Registry.ListRepositories(rs.ctx, registry, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -166,7 +166,7 @@ func (rs *registryService) ListRepositories(registry string) ([]Repository, erro } func (rs *registryService) ListRepositoriesV2(registry string) ([]RepositoryV2, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := rs.client.Registry.ListRepositoriesV2(rs.ctx, registry, &godo.TokenListOptions{ Page: opt.Page, PerPage: opt.PerPage, @@ -176,7 +176,7 @@ func (rs *registryService) ListRepositoriesV2(registry string) ([]RepositoryV2, return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -199,13 +199,13 @@ func (rs *registryService) ListRepositoriesV2(registry string) ([]RepositoryV2, } func (rs *registryService) ListRepositoryTags(registry, repository string) ([]RepositoryTag, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := rs.client.Registry.ListRepositoryTags(rs.ctx, registry, repository, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -228,13 +228,13 @@ func (rs *registryService) ListRepositoryTags(registry, repository string) ([]Re } func (rs *registryService) ListRepositoryManifests(registry, repository string) ([]RepositoryManifest, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := rs.client.Registry.ListRepositoryManifests(rs.ctx, registry, repository, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -285,13 +285,13 @@ func (rs *registryService) GetGarbageCollection(registry string) (*GarbageCollec } func (rs *registryService) ListGarbageCollections(registry string) ([]GarbageCollection, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := rs.client.Registry.ListGarbageCollections(rs.ctx, registry, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/reserved_ip_actions.go b/do/reserved_ip_actions.go index 3284de07d..cadde110e 100644 --- a/do/reserved_ip_actions.go +++ b/do/reserved_ip_actions.go @@ -69,13 +69,13 @@ func (fia *reservedIPActionsService) Get(ip string, actionID int) (*Action, erro } func (fia *reservedIPActionsService) List(ip string, opt *godo.ListOptions) ([]Action, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := fia.client.ReservedIPActions.List(context.TODO(), ip, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/reserved_ips.go b/do/reserved_ips.go index df4b03e2d..b6e29a432 100644 --- a/do/reserved_ips.go +++ b/do/reserved_ips.go @@ -49,13 +49,13 @@ func NewReservedIPsService(client *godo.Client) ReservedIPsService { } func (fis *reservedIPsService) List() (ReservedIPs, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := fis.client.ReservedIPs.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/serverless.go b/do/serverless.go index 52ca94a20..594fd2728 100644 --- a/do/serverless.go +++ b/do/serverless.go @@ -129,19 +129,19 @@ type ServerlessProject struct { // ServerlessSpec describes a project.yml spec // reference: https://docs.nimbella.com/configuration/ type ServerlessSpec struct { - Parameters map[string]interface{} `json:"parameters,omitempty"` - Environment map[string]interface{} `json:"environment,omitempty"` - Packages []*ServerlessPackage `json:"packages,omitempty"` + Parameters map[string]any `json:"parameters,omitempty"` + Environment map[string]any `json:"environment,omitempty"` + Packages []*ServerlessPackage `json:"packages,omitempty"` } // ServerlessPackage ... type ServerlessPackage struct { - Name string `json:"name,omitempty"` - Shared bool `json:"shared,omitempty"` - Environment map[string]interface{} `json:"environment,omitempty"` - Parameters map[string]interface{} `json:"parameters,omitempty"` - Annotations map[string]interface{} `json:"annotations,omitempty"` - Functions []*ServerlessFunction `json:"functions,omitempty"` + Name string `json:"name,omitempty"` + Shared bool `json:"shared,omitempty"` + Environment map[string]any `json:"environment,omitempty"` + Parameters map[string]any `json:"parameters,omitempty"` + Annotations map[string]any `json:"annotations,omitempty"` + Functions []*ServerlessFunction `json:"functions,omitempty"` } // ServerlessFunction ... @@ -152,12 +152,12 @@ type ServerlessFunction struct { Runtime string `json:"runtime,omitempty"` // `web` can be either true or "raw". We use interface{} to support both types. If we start consuming the value we // should probably define a custom type with proper validation. - Web interface{} `json:"web,omitempty"` - WebSecure interface{} `json:"webSecure,omitempty" yaml:"webSecure"` - Parameters map[string]interface{} `json:"parameters,omitempty"` - Environment map[string]interface{} `json:"environment,omitempty"` - Annotations map[string]interface{} `json:"annotations,omitempty"` - Limits map[string]int `json:"limits,omitempty"` + Web any `json:"web,omitempty"` + WebSecure any `json:"webSecure,omitempty" yaml:"webSecure"` + Parameters map[string]any `json:"parameters,omitempty"` + Environment map[string]any `json:"environment,omitempty"` + Annotations map[string]any `json:"annotations,omitempty"` + Limits map[string]int `json:"limits,omitempty"` } // ProjectMetadata describes the nim project:get-metadata output structure. @@ -195,8 +195,8 @@ type ServerlessTrigger struct { } type TriggerScheduledDetails struct { - Cron string `json:"cron,omitempty"` - Body map[string]interface{} `json:"body,omitempty"` + Cron string `json:"cron,omitempty"` + Body map[string]any `json:"body,omitempty"` } type TriggerScheduledRuns struct { @@ -231,8 +231,8 @@ type ServerlessService interface { GetFunction(string, bool) (whisk.Action, []FunctionParameter, error) ListFunctions(string, int, int) ([]whisk.Action, error) DeleteFunction(string, bool) error - InvokeFunction(string, interface{}, bool, bool) (interface{}, error) - InvokeFunctionViaWeb(string, interface{}) error + InvokeFunction(string, any, bool, bool) (any, error) + InvokeFunctionViaWeb(string, any) error ListActivations(whisk.ActivationListOptions) ([]whisk.Activation, error) GetActivationCount(whisk.ActivationCountOptions) (whisk.ActivationCount, error) GetActivation(string) (whisk.Activation, error) @@ -315,11 +315,11 @@ var ( // ServerlessOutput contains the output returned from calls to the sandbox plugin. type ServerlessOutput struct { - Table []map[string]interface{} `json:"table,omitempty"` - Captured []string `json:"captured,omitempty"` - Formatted []string `json:"formatted,omitempty"` - Entity interface{} `json:"entity,omitempty"` - Error string `json:"error,omitempty"` + Table []map[string]any `json:"table,omitempty"` + Captured []string `json:"captured,omitempty"` + Formatted []string `json:"formatted,omitempty"` + Entity any `json:"entity,omitempty"` + Error string `json:"error,omitempty"` } // NewServerlessService returns a configured ServerlessService. @@ -898,8 +898,8 @@ func (s *serverlessService) DeletePackage(name string, recursive bool) error { } // InvokeFunction invokes a function via POST with authentication -func (s *serverlessService) InvokeFunction(name string, params interface{}, blocking bool, result bool) (interface{}, error) { - var empty map[string]interface{} +func (s *serverlessService) InvokeFunction(name string, params any, blocking bool, result bool) (any, error) { + var empty map[string]any err := initWhisk(s) if err != nil { return empty, err @@ -909,7 +909,7 @@ func (s *serverlessService) InvokeFunction(name string, params interface{}, bloc } // InvokeFunctionViaWeb invokes a function via GET using its web URL (or error if not a web function) -func (s *serverlessService) InvokeFunctionViaWeb(name string, params interface{}) error { +func (s *serverlessService) InvokeFunctionViaWeb(name string, params any) error { // Get the function so we can use its metadata in formulating the request theFunction, _, err := s.GetFunction(name, false) if err != nil { @@ -941,7 +941,7 @@ func (s *serverlessService) InvokeFunctionViaWeb(name string, params interface{} // Add params, if any if params != nil { encoded := url.Values{} - for key, val := range params.(map[string]interface{}) { + for key, val := range params.(map[string]any) { stringVal, ok := val.(string) if !ok { return fmt.Errorf("the value of '%s' is not a string; web invocation is not possible", key) diff --git a/do/sizes.go b/do/sizes.go index 801dbd4a1..9cfbd1e96 100644 --- a/do/sizes.go +++ b/do/sizes.go @@ -46,13 +46,13 @@ func NewSizesService(client *godo.Client) SizesService { } func (rs *sizesService) List() (Sizes, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := rs.client.Sizes.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/snapshots.go b/do/snapshots.go index 72d45383a..60542dbbc 100644 --- a/do/snapshots.go +++ b/do/snapshots.go @@ -50,13 +50,13 @@ func NewSnapshotsService(client *godo.Client) SnapshotsService { } func (ss *snapshotsService) List() (Snapshots, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ss.client.Snapshots.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -79,13 +79,13 @@ func (ss *snapshotsService) List() (Snapshots, error) { } func (ss *snapshotsService) ListVolume() (Snapshots, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ss.client.Snapshots.ListVolume(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -108,13 +108,13 @@ func (ss *snapshotsService) ListVolume() (Snapshots, error) { } func (ss *snapshotsService) ListDroplet() (Snapshots, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ss.client.Snapshots.ListDroplet(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/sshkeys.go b/do/sshkeys.go index e92f0f112..1086e8822 100644 --- a/do/sshkeys.go +++ b/do/sshkeys.go @@ -52,13 +52,13 @@ func NewKeysService(client *godo.Client) KeysService { } func (ks *keysService) List() (SSHKeys, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ks.client.Keys.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/tags.go b/do/tags.go index d7b7ba30d..6132d5714 100644 --- a/do/tags.go +++ b/do/tags.go @@ -51,13 +51,13 @@ func NewTagsService(godoClient *godo.Client) TagsService { } func (ts *tagsService) List() (Tags, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ts.client.Tags.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/uptime_checks.go b/do/uptime_checks.go index a150efe33..3161cbba9 100644 --- a/do/uptime_checks.go +++ b/do/uptime_checks.go @@ -71,13 +71,13 @@ func (ucs *uptimeChecksService) Create(req *godo.CreateUptimeCheckRequest) (*Upt } func (ucs *uptimeChecksService) List() ([]UptimeCheck, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ucs.client.UptimeChecks.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -136,13 +136,13 @@ func (ucs *uptimeChecksService) CreateAlert(id string, req *godo.CreateUptimeAle } func (ucs *uptimeChecksService) ListAlerts(id string) ([]UptimeAlert, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := ucs.client.UptimeChecks.ListAlerts(context.TODO(), id, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/volume_actions.go b/do/volume_actions.go index ce93e79ac..fc9fca6d4 100644 --- a/do/volume_actions.go +++ b/do/volume_actions.go @@ -55,13 +55,13 @@ func (vas *volumeActionsService) Get(volumeID string, actionID int) (*Action, er } func (vas *volumeActionsService) List(volumeID string) ([]Action, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := vas.client.StorageActions.List(context.TODO(), volumeID, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/volumes.go b/do/volumes.go index f3ab789a9..b1fa8f5e3 100644 --- a/do/volumes.go +++ b/do/volumes.go @@ -37,14 +37,14 @@ func NewVolumesService(godoClient *godo.Client) VolumesService { } func (a *volumesService) List() ([]Volume, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { params := &godo.ListVolumeParams{ListOptions: opt} list, resp, err := a.client.Storage.ListVolumes(context.TODO(), params) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } @@ -111,13 +111,13 @@ func (a *volumesService) DeleteSnapshot(snapshotID string) error { } func (a *volumesService) ListSnapshots(volumeID string, opt *godo.ListOptions) ([]Snapshot, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := a.client.Storage.ListSnapshots(context.TODO(), volumeID, opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/do/vpcs.go b/do/vpcs.go index 05acfdf1a..be85bb033 100644 --- a/do/vpcs.go +++ b/do/vpcs.go @@ -60,13 +60,13 @@ func (v *vpcsService) Get(vpcUUID string) (*VPC, error) { } func (v *vpcsService) List() (VPCs, error) { - f := func(opt *godo.ListOptions) ([]interface{}, *godo.Response, error) { + f := func(opt *godo.ListOptions) ([]any, *godo.Response, error) { list, resp, err := v.client.VPCs.List(context.TODO(), opt) if err != nil { return nil, nil, err } - si := make([]interface{}, len(list)) + si := make([]any, len(list)) for i := range list { si[i] = list[i] } diff --git a/doit.go b/doit.go index e31873c60..bbf7e0600 100644 --- a/doit.go +++ b/doit.go @@ -191,7 +191,7 @@ func (glv *GithubLatestVersioner) LatestVersion() (string, error) { defer res.Body.Close() - var m map[string]interface{} + var m map[string]any if err = json.NewDecoder(res.Body).Decode(&m); err != nil { return "", err } @@ -211,7 +211,7 @@ type Config interface { GetDockerEngineClient() (builder.DockerEngineClient, error) SSH(user, host, keyPath string, port int, opts ssh.Options) runner.Runner Listen(url *url.URL, token string, schemaFunc listen.SchemaFunc, out io.Writer) listen.ListenerService - Set(ns, key string, val interface{}) + Set(ns, key string, val any) IsSet(key string) bool GetString(ns, key string) (string, error) GetBool(ns, key string) (bool, error) @@ -329,7 +329,7 @@ func (c *LiveConfig) Listen(url *url.URL, token string, schemaFunc listen.Schema } // Set sets a config key. -func (c *LiveConfig) Set(ns, key string, val interface{}) { +func (c *LiveConfig) Set(ns, key string, val any) { viper.Set(nskey(ns, key), val) } @@ -517,7 +517,7 @@ func (c *TestConfig) Listen(url *url.URL, token string, schemaFunc listen.Schema } // Set sets a config key. -func (c *TestConfig) Set(ns, key string, val interface{}) { +func (c *TestConfig) Set(ns, key string, val any) { nskey := nskey(ns, key) c.v.Set(nskey, val) c.IsSetMap[key] = true diff --git a/integration/database_config_get_test.go b/integration/database_config_get_test.go new file mode 100644 index 000000000..298bb2919 --- /dev/null +++ b/integration/database_config_get_test.go @@ -0,0 +1,261 @@ +package integration + +import ( + "fmt" + "net/http" + "net/http/httptest" + "net/http/httputil" + "os/exec" + "strings" + "testing" + + "github.com/sclevine/spec" + "github.com/stretchr/testify/require" +) + +var _ = suite("database/config/get", func(t *testing.T, when spec.G, it spec.S) { + var ( + expect *require.Assertions + server *httptest.Server + ) + + it.Before(func() { + expect = require.New(t) + + server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + switch req.URL.Path { + case "/v2/databases/mysql-database-id/config": + auth := req.Header.Get("Authorization") + if auth != "Bearer some-magic-token" { + w.WriteHeader(http.StatusTeapot) + } + + if req.Method != http.MethodGet { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + w.Write([]byte(databaseConfigMySQLGetResponse)) + case "/v2/databases/pg-database-id/config": + auth := req.Header.Get("Authorization") + if auth != "Bearer some-magic-token" { + w.WriteHeader(http.StatusTeapot) + } + + if req.Method != http.MethodGet { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + w.Write([]byte(databaseConfigPGGetResponse)) + case "/v2/databases/redis-database-id/config": + auth := req.Header.Get("Authorization") + if auth != "Bearer some-magic-token" { + w.WriteHeader(http.StatusTeapot) + } + + if req.Method != http.MethodGet { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + w.Write([]byte(databaseConfigRedisGetResponse)) + default: + dump, err := httputil.DumpRequest(req, true) + if err != nil { + t.Fatal("failed to dump request") + } + + t.Fatalf("received unknown request: %s", dump) + } + })) + }) + + when("all required flags are passed", func() { + it("gets the mysql database config", func() { + cmd := exec.Command(builtBinaryPath, + "-t", "some-magic-token", + "-u", server.URL, + "database", + "configuration", + "get", + "--engine", "mysql", + "mysql-database-id", + ) + + output, err := cmd.CombinedOutput() + expect.NoError(err, fmt.Sprintf("received error output: %s", output)) + expect.Equal(strings.TrimSpace(databaseConfigMySQLGetOutput), strings.TrimSpace(string(output))) + }) + }) + + when("all required flags are passed", func() { + it("gets the pg database config", func() { + cmd := exec.Command(builtBinaryPath, + "-t", "some-magic-token", + "-u", server.URL, + "database", + "configuration", + "get", + "--engine", "pg", + "pg-database-id", + ) + + output, err := cmd.CombinedOutput() + expect.NoError(err, fmt.Sprintf("received error output: %s", output)) + expect.Equal(strings.TrimSpace(databaseConfigPGGetOutput), strings.TrimSpace(string(output))) + }) + }) + + when("all required flags are passed", func() { + it("gets the redis database config", func() { + cmd := exec.Command(builtBinaryPath, + "-t", "some-magic-token", + "-u", server.URL, + "database", + "configuration", + "get", + "--engine", "redis", + "redis-database-id", + ) + + output, err := cmd.CombinedOutput() + expect.NoError(err, fmt.Sprintf("received error output: %s", output)) + expect.Equal(strings.TrimSpace(databaseConfigRedisGetOutput), strings.TrimSpace(string(output))) + }) + }) +}) + +const ( + databaseConfigMySQLGetOutput = ` +key value +DefaultTimeZone UTC +MaxAllowedPacket 67108864 +SQLMode ANSI,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION,NO_ZERO_DATE,NO_ZERO_IN_DATE,STRICT_ALL_TABLES +SQLRequirePrimaryKey true +InnodbFtMinTokenSize 3 +InnodbFtServerStopwordTable +InnodbPrintAllDeadlocks false +InnodbRollbackOnTimeout false +SlowQueryLog false +LongQueryTime 10 +BackupHour 18 +BackupMinute 3 +` + + databaseConfigMySQLGetResponse = ` +{ + "config": { + "default_time_zone": "UTC", + "max_allowed_packet": 67108864, + "sql_mode": "ANSI,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION,NO_ZERO_DATE,NO_ZERO_IN_DATE,STRICT_ALL_TABLES", + "sql_require_primary_key": true, + "innodb_change_buffer_max_size": 25, + "innodb_flush_neighbors": 1, + "innodb_ft_min_token_size": 3, + "innodb_ft_server_stopword_table": "", + "innodb_print_all_deadlocks": false, + "innodb_read_io_threads": 4, + "innodb_rollback_on_timeout": false, + "innodb_thread_concurrency": 0, + "innodb_write_io_threads": 4, + "net_buffer_length": 16384, + "slow_query_log": false, + "long_query_time": 10, + "backup_hour": 18, + "backup_minute": 3 + } +}` + + databaseConfigPGGetOutput = ` +key value +AutovacuumNaptime 60 +AutovacuumVacuumThreshold 50 +AutovacuumAnalyzeThreshold 50 +AutovacuumVacuumScaleFactor 0.2 +AutovacuumAnalyzeScaleFactor 0.2 +AutovacuumVacuumCostDelay 20 +AutovacuumVacuumCostLimit -1 +BGWriterFlushAfter 512 +BGWriterLRUMaxpages 100 +BGWriterLRUMultiplier 2 +IdleInTransactionSessionTimeout 0 +JIT true +LogAutovacuumMinDuration -1 +LogMinDurationStatement -1 +MaxPreparedTransactions 0 +MaxParallelWorkers 8 +MaxParallelWorkersPerGather 2 +TempFileLimit -1 +WalSenderTimeout 60000 +PgBouncer.ServerResetQueryAlways false +PgBouncer.MinPoolSize 0 +PgBouncer.ServerIdleTimeout 0 +PgBouncer.AutodbPoolSize 0 +PgBouncer.AutodbMaxDbConnections 0 +PgBouncer.AutodbIdleTimeout 0 +BackupHour 18 +BackupMinute 26` + + databaseConfigPGGetResponse = `{ +"config": { + "autovacuum_naptime": 60, + "autovacuum_vacuum_threshold": 50, + "autovacuum_analyze_threshold": 50, + "autovacuum_vacuum_scale_factor": 0.2, + "autovacuum_analyze_scale_factor": 0.2, + "autovacuum_vacuum_cost_delay": 20, + "autovacuum_vacuum_cost_limit": -1, + "bgwriter_flush_after": 512, + "bgwriter_lru_maxpages": 100, + "bgwriter_lru_multiplier": 2, + "idle_in_transaction_session_timeout": 0, + "jit": true, + "log_autovacuum_min_duration": -1, + "log_min_duration_statement": -1, + "max_prepared_transactions": 0, + "max_parallel_workers": 8, + "max_parallel_workers_per_gather": 2, + "temp_file_limit": -1, + "wal_sender_timeout": 60000, + "pgbouncer": { + "server_reset_query_always": false, + "min_pool_size": 0, + "server_idle_timeout": 0, + "autodb_pool_size": 0, + "autodb_max_db_connections": 0, + "autodb_idle_timeout": 0 + }, + "backup_hour": 18, + "backup_minute": 26, + "timescaledb": {}, + "stat_monitor_enable": false +} +}` + + databaseConfigRedisGetOutput = ` +key value +RedisMaxmemoryPolicy volatile-lru +RedisLFULogFactor 10 +RedisLFUDecayTime 1 +RedisSSL true +RedisTimeout 600 +RedisNotifyKeyspaceEvents +RedisPersistence rdb +RedisACLChannelsDefault allchannels +` + + databaseConfigRedisGetResponse = `{ + "config": { + "redis_maxmemory_policy": "volatile-lru", + "redis_lfu_log_factor": 10, + "redis_lfu_decay_time": 1, + "redis_ssl": true, + "redis_timeout": 600, + "redis_notify_keyspace_events": "", + "redis_persistence": "rdb", + "redis_acl_channels_default": "allchannels" + } + } +` +) diff --git a/integration/database_config_update_test.go b/integration/database_config_update_test.go new file mode 100644 index 000000000..83793a496 --- /dev/null +++ b/integration/database_config_update_test.go @@ -0,0 +1,155 @@ +package integration + +import ( + "fmt" + "io" + "net/http" + "net/http/httptest" + "net/http/httputil" + "os/exec" + "strings" + "testing" + + "github.com/sclevine/spec" + "github.com/stretchr/testify/require" +) + +var _ = suite("database/config/get", func(t *testing.T, when spec.G, it spec.S) { + var ( + expect *require.Assertions + server *httptest.Server + ) + + it.Before(func() { + expect = require.New(t) + + server = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + switch req.URL.Path { + case "/v2/databases/mysql-database-id/config": + auth := req.Header.Get("Authorization") + if auth != "Bearer some-magic-token" { + w.WriteHeader(http.StatusTeapot) + } + + if req.Method != http.MethodPatch { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + expected := `{"config":{"sql_mode":"ANSI"}}` + b, err := io.ReadAll(req.Body) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + expect.Equal(expected, strings.TrimSpace(string(b))) + + w.WriteHeader(http.StatusOK) + case "/v2/databases/pg-database-id/config": + auth := req.Header.Get("Authorization") + if auth != "Bearer some-magic-token" { + w.WriteHeader(http.StatusTeapot) + } + + if req.Method != http.MethodPatch { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + expected := `{"config":{"pgbouncer":{"server_reset_query_always":false}}}` + b, err := io.ReadAll(req.Body) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + expect.Equal(expected, strings.TrimSpace(string(b))) + + w.WriteHeader(http.StatusOK) + case "/v2/databases/redis-database-id/config": + auth := req.Header.Get("Authorization") + if auth != "Bearer some-magic-token" { + w.WriteHeader(http.StatusTeapot) + } + + if req.Method != http.MethodPatch { + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + expected := `{"config":{"redis_timeout":1200}}` + b, err := io.ReadAll(req.Body) + if err != nil { + w.WriteHeader(http.StatusInternalServerError) + return + } + expect.Equal(expected, strings.TrimSpace(string(b))) + + w.WriteHeader(http.StatusOK) + default: + dump, err := httputil.DumpRequest(req, true) + if err != nil { + t.Fatal("failed to dump request") + } + + t.Fatalf("received unknown request: %s", dump) + } + })) + }) + + when("all required flags are passed", func() { + it("updates the mysql database config", func() { + cmd := exec.Command(builtBinaryPath, + "-t", "some-magic-token", + "-u", server.URL, + "database", + "configuration", + "update", + "--engine", "mysql", + "mysql-database-id", + "--config-json", `{"sql_mode": "ANSI"}`, + ) + + output, err := cmd.CombinedOutput() + expect.NoError(err, fmt.Sprintf("received error output: %s", output)) + expect.Empty(strings.TrimSpace(string(output))) + }) + }) + + when("all required flags are passed", func() { + it("updates the pg database config", func() { + cmd := exec.Command(builtBinaryPath, + "-t", "some-magic-token", + "-u", server.URL, + "database", + "configuration", + "update", + "--engine", "pg", + "pg-database-id", + "--config-json", `{"pgbouncer":{"server_reset_query_always": false}}`, + ) + + output, err := cmd.CombinedOutput() + expect.NoError(err, fmt.Sprintf("received error output: %s", output)) + expect.Empty(strings.TrimSpace(string(output))) + }) + }) + + when("all required flags are passed", func() { + it("updates the redis database config", func() { + cmd := exec.Command(builtBinaryPath, + "-t", "some-magic-token", + "-u", server.URL, + "database", + "configuration", + "update", + "--engine", "redis", + "redis-database-id", + "--config-json", `{"redis_timeout":1200}`, + ) + + output, err := cmd.CombinedOutput() + expect.NoError(err, fmt.Sprintf("received error output: %s", output)) + expect.Empty(strings.TrimSpace(string(output))) + }) + }) +}) diff --git a/integration/database_connection_test.go b/integration/database_connection_test.go index 4b70df3e1..13c71cdb6 100644 --- a/integration/database_connection_test.go +++ b/integration/database_connection_test.go @@ -13,7 +13,7 @@ import ( "github.com/stretchr/testify/require" ) -var _ = suite.Focus("database/connection", func(t *testing.T, when spec.G, it spec.S) { +var _ = suite("database/connection", func(t *testing.T, when spec.G, it spec.S) { var ( expect *require.Assertions server *httptest.Server diff --git a/integration/database_create_fork_test.go b/integration/database_create_fork_test.go index 2a6f649ba..5b26f46ea 100644 --- a/integration/database_create_fork_test.go +++ b/integration/database_create_fork_test.go @@ -44,12 +44,12 @@ var _ = suite("database/create/fork", func(t *testing.T, when spec.G, it spec.S) expect.NoError(err) request := struct { - Name string `json:"name"` - Engine string `json:"engine"` - Version string `json:"version"` - Region string `json:"region"` - Nodes int `json:"num_nodes"` - BackupRestore interface{} `json:"backup_restore"` + Name string `json:"name"` + Engine string `json:"engine"` + Version string `json:"version"` + Region string `json:"region"` + Nodes int `json:"num_nodes"` + BackupRestore any `json:"backup_restore"` }{} err = json.Unmarshal(reqBody, &request) diff --git a/integration/database_create_restore_from_cluster.go b/integration/database_create_restore_from_cluster.go index dc0bad56a..2e3585625 100644 --- a/integration/database_create_restore_from_cluster.go +++ b/integration/database_create_restore_from_cluster.go @@ -44,12 +44,12 @@ var _ = suite("database/create/backup-restore", func(t *testing.T, when spec.G, expect.NoError(err) request := struct { - Name string `json:"name"` - Engine string `json:"engine"` - Version string `json:"version"` - Region string `json:"region"` - Nodes int `json:"num_nodes"` - BackupRestore interface{} `json:"backup_restore"` + Name string `json:"name"` + Engine string `json:"engine"` + Version string `json:"version"` + Region string `json:"region"` + Nodes int `json:"num_nodes"` + BackupRestore any `json:"backup_restore"` }{} err = json.Unmarshal(reqBody, &request) @@ -130,8 +130,8 @@ const ( restoreFromTimestampError = "Error: Invalid format for --restore-from-timestamp. Must be in UTC format: 2006-01-02 15:04:05 +0000 UTC" databasesCreateRestoreBackUpOutput = ` Notice: Database created -ID Name Engine Version Number of Nodes Region Status Size URI Created At -some-id new-db-name mysql what-version 100 nyc3 creating biggest mysql://doadmin:secret@aaa-bbb-ccc-111-222-333.db.ondigitalocean.com:25060/defaultdb 2019-01-11 18:37:36 +0000 UTC +ID Name Engine Version Number of Nodes Region Status Size URI Created At Storage (MiB) +some-id new-db-name mysql what-version 100 nyc3 creating biggest mysql://doadmin:secret@aaa-bbb-ccc-111-222-333.db.ondigitalocean.com:25060/defaultdb 2019-01-11 18:37:36 +0000 UTC 100 ` databaseRestoreBackUpCreateRequestBody = `{ "name":"new-db-name", @@ -165,7 +165,8 @@ some-id new-db-name mysql what-version 100 nyc3 "size": "biggest", "tags": [ "production" - ] + ], + "storage_size_mib": 100 } }` ) diff --git a/integration/database_create_test.go b/integration/database_create_test.go index 2e87c3c63..95684ff65 100644 --- a/integration/database_create_test.go +++ b/integration/database_create_test.go @@ -144,14 +144,14 @@ var _ = suite("database/create", func(t *testing.T, when spec.G, it spec.S) { const ( databasesCreateOutput = ` Notice: Database created -ID Name Engine Version Number of Nodes Region Status Size URI Created At -some-id my-database-name mysql what-version 100 nyc3 creating biggest mysql://doadmin:secret@aaa-bbb-ccc-111-222-333.db.ondigitalocean.com:25060/defaultdb 2019-01-11 18:37:36 +0000 UTC +ID Name Engine Version Number of Nodes Region Status Size URI Created At Storage (MiB) +some-id my-database-name mysql what-version 100 nyc3 creating biggest mysql://doadmin:secret@aaa-bbb-ccc-111-222-333.db.ondigitalocean.com:25060/defaultdb 2019-01-11 18:37:36 +0000 UTC 100 ` databasesWaitCreateOutput = ` Notice: Database creation is in progress, waiting for database to be online Notice: Database created -ID Name Engine Version Number of Nodes Region Status Size URI Created At -some-id my-database-name mysql what-version 100 nyc3 online biggest mysql://doadmin:secret@aaa-bbb-ccc-111-222-333.db.ondigitalocean.com:25060/defaultdb 2019-01-11 18:37:36 +0000 UTC +ID Name Engine Version Number of Nodes Region Status Size URI Created At Storage (MiB) +some-id my-database-name mysql what-version 100 nyc3 online biggest mysql://doadmin:secret@aaa-bbb-ccc-111-222-333.db.ondigitalocean.com:25060/defaultdb 2019-01-11 18:37:36 +0000 UTC 100 ` databaseCreateResponse = ` { @@ -172,7 +172,8 @@ some-id my-database-name mysql what-version 100 nyc3 "created_at": "2019-01-11T18:37:36Z", "maintenance_window": null, "size": "biggest", - "tags": ["{{.Tags}}"] + "tags": ["{{.Tags}}"], + "storage_size_mib": 100 } }` @@ -195,7 +196,8 @@ some-id my-database-name mysql what-version 100 nyc3 "size": "biggest", "tags": [ "test" - ] + ], + "storage_size_mib": 100 } }` ) diff --git a/integration/database_firewall_add_test.go b/integration/database_firewall_add_test.go index 91c7481fd..910b7959f 100644 --- a/integration/database_firewall_add_test.go +++ b/integration/database_firewall_add_test.go @@ -36,7 +36,7 @@ func (ms *mockServer) Get(t *testing.T, w http.ResponseWriter, r *http.Request) return } - data, err := json.Marshal(map[string]interface{}{ + data, err := json.Marshal(map[string]any{ "rules": ms.rules, }) if err != nil { diff --git a/integration/monitoring_test.go b/integration/monitoring_test.go index b0fe2e83c..790d77633 100644 --- a/integration/monitoring_test.go +++ b/integration/monitoring_test.go @@ -94,7 +94,8 @@ UUID Type Description ` ) -var _ = suite("monitoring/alerts/create", func(t *testing.T, when spec.G, it spec.S) { +// TODO: Re-enable test +var _ = suite.Pend("monitoring/alerts/create", func(t *testing.T, when spec.G, it spec.S) { var ( expect *require.Assertions server *httptest.Server @@ -188,7 +189,8 @@ UUID Type Description ` ) -var _ = suite("monitoring/alerts/update", func(t *testing.T, when spec.G, it spec.S) { +// TODO: Re-enable test +var _ = suite.Pend("monitoring/alerts/update", func(t *testing.T, when spec.G, it spec.S) { var ( expect *require.Assertions server *httptest.Server diff --git a/integration/registry_garbagecollection_test.go b/integration/registry_garbagecollection_test.go index 78a88b857..7ff864a05 100644 --- a/integration/registry_garbagecollection_test.go +++ b/integration/registry_garbagecollection_test.go @@ -265,7 +265,7 @@ var _ = suite("registry/garbage-collection", func(t *testing.T, when spec.G, it }) }) -func reifyTemplateStr(t *testing.T, tmplStr string, v interface{}) string { +func reifyTemplateStr(t *testing.T, tmplStr string, v any) string { tmpl, err := template.New("meow").Parse(tmplStr) require.NoError(t, err) diff --git a/internal/apps/builder/cnb.go b/internal/apps/builder/cnb.go index 2f334c514..73ec909b4 100644 --- a/internal/apps/builder/cnb.go +++ b/internal/apps/builder/cnb.go @@ -25,8 +25,8 @@ import ( const ( // CNBBuilderImage represents the local cnb builder. - CNBBuilderImage_Heroku18 = "digitaloceanapps/cnb-local-builder:heroku-18_63b9615" - CNBBuilderImage_Heroku22 = "digitaloceanapps/cnb-local-builder:heroku-22_63b9615" + CNBBuilderImage_Heroku18 = "digitaloceanapps/cnb-local-builder:heroku-18_da24158" + CNBBuilderImage_Heroku22 = "digitaloceanapps/cnb-local-builder:heroku-22_da24158" appVarAllowListKey = "APP_VARS" appVarPrefix = "APP_VAR_" diff --git a/internal/apps/builder/docker_test.go b/internal/apps/builder/docker_test.go index c8893ab11..609e36efb 100644 --- a/internal/apps/builder/docker_test.go +++ b/internal/apps/builder/docker_test.go @@ -342,7 +342,7 @@ type testingT struct { errors []string } -func (t *testingT) Errorf(format string, args ...interface{}) { +func (t *testingT) Errorf(format string, args ...any) { t.mtx.Lock() defer t.mtx.Unlock() t.failed = true diff --git a/pkg/ssh/ssh.go b/pkg/ssh/ssh.go index 5a4db154b..0ed1f3168 100644 --- a/pkg/ssh/ssh.go +++ b/pkg/ssh/ssh.go @@ -24,7 +24,7 @@ import ( ) // Options is the type used to specify options passed to the SSH command -type Options map[string]interface{} +type Options map[string]any // Runner runs ssh commands. type Runner struct { diff --git a/pkg/urn/urn.go b/pkg/urn/urn.go index 13ac4c6e3..4b2b08290 100644 --- a/pkg/urn/urn.go +++ b/pkg/urn/urn.go @@ -36,7 +36,7 @@ func ParseURN(s string) (*URN, error) { } // NewURN constructs an *URN from a namespace, resource type, and identifier. -func NewURN(namespace string, collection string, id interface{}) *URN { +func NewURN(namespace string, collection string, id any) *URN { return &URN{ namespace: strings.ToLower(namespace), collection: strings.ToLower(collection), diff --git a/pkg/urn/urn_test.go b/pkg/urn/urn_test.go index 17a11fe64..5e8a4421b 100644 --- a/pkg/urn/urn_test.go +++ b/pkg/urn/urn_test.go @@ -74,7 +74,7 @@ func TestNewURN(t *testing.T) { name string namespace string collection string - identifier interface{} + identifier any expected *URN asString string }{