From 75bf36791caae57f91263fa7ba5db658c4a6d531 Mon Sep 17 00:00:00 2001 From: QuentinBisson Date: Wed, 3 Apr 2024 13:39:32 +0200 Subject: [PATCH] Take over prometheus agent remote write config from PMO. --- go.mod | 6 +- .../templates/deployment.yaml | 2 + .../observability-operator/values.schema.json | 9 + helm/observability-operator/values.yaml | 2 + main.go | 42 ++-- pkg/common/password/manager.go | 21 ++ pkg/common/types.go | 4 + pkg/monitoring/prometheusagent/config.go | 38 ++++ pkg/monitoring/prometheusagent/parsing.go | 36 ++++ pkg/monitoring/prometheusagent/secret.go | 11 + pkg/monitoring/prometheusagent/service.go | 201 ++++++++++++++---- 11 files changed, 314 insertions(+), 58 deletions(-) create mode 100644 pkg/common/password/manager.go create mode 100644 pkg/monitoring/prometheusagent/config.go create mode 100644 pkg/monitoring/prometheusagent/parsing.go create mode 100644 pkg/monitoring/prometheusagent/secret.go diff --git a/go.mod b/go.mod index 64dabf07..7d354e63 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,8 @@ module github.com/giantswarm/observability-operator -go 1.21 +go 1.22 require ( - github.com/go-logr/logr v1.4.1 github.com/onsi/ginkgo/v2 v2.17.1 github.com/onsi/gomega v1.32.0 github.com/opsgenie/opsgenie-go-sdk-v2 v1.2.22 @@ -11,7 +10,7 @@ require ( github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring v0.72.0 github.com/prometheus/client_golang v1.19.0 github.com/prometheus/common v0.51.1 - github.com/sirupsen/logrus v1.9.0 + github.com/sirupsen/logrus v1.9.3 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.29.3 k8s.io/apimachinery v0.29.3 @@ -27,6 +26,7 @@ require ( github.com/emicklei/go-restful/v3 v3.12.0 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/zapr v1.3.0 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.21.0 // indirect diff --git a/helm/observability-operator/templates/deployment.yaml b/helm/observability-operator/templates/deployment.yaml index 1bea8099..e2aec3dc 100644 --- a/helm/observability-operator/templates/deployment.yaml +++ b/helm/observability-operator/templates/deployment.yaml @@ -24,7 +24,9 @@ spec: image: "{{ .Values.image.registry }}/{{ .Values.image.name }}:{{ default .Chart.Version .Values.image.tag }}" args: - --leader-elect + - --management-cluster-base-domain={{ $.Values.managementCluster.baseDomain }} - --management-cluster-customer={{ $.Values.managementCluster.customer }} + - --management-cluster-insecure-ca={{ $.Values.managementCluster.insecureCA }} - --management-cluster-name={{ $.Values.managementCluster.name }} - --management-cluster-pipeline={{ $.Values.managementCluster.pipeline }} - --management-cluster-region={{ $.Values.managementCluster.region }} diff --git a/helm/observability-operator/values.schema.json b/helm/observability-operator/values.schema.json index 4be449fe..fa4f3a8d 100644 --- a/helm/observability-operator/values.schema.json +++ b/helm/observability-operator/values.schema.json @@ -32,9 +32,15 @@ "managementCluster": { "type": "object", "properties": { + "baseDomain": { + "type": "string" + }, "customer": { "type": "string" }, + "insecureCA": { + "type": "boolean" + }, "name": { "type": "string" }, @@ -54,6 +60,9 @@ }, "opsgenieApiKey": { "type": "string" + }, + "prometheusVersion": { + "type": "string" } } }, diff --git a/helm/observability-operator/values.yaml b/helm/observability-operator/values.yaml index 8e02a133..2d4231e0 100644 --- a/helm/observability-operator/values.yaml +++ b/helm/observability-operator/values.yaml @@ -8,7 +8,9 @@ image: tag: "" managementCluster: + baseDomain: domain customer: customer + insecureCA: false name: name pipeline: pipeline region: region diff --git a/main.go b/main.go index 1a90c946..1d40ecd8 100644 --- a/main.go +++ b/main.go @@ -40,6 +40,7 @@ import ( "github.com/giantswarm/observability-operator/internal/controller" "github.com/giantswarm/observability-operator/pkg/common" "github.com/giantswarm/observability-operator/pkg/common/organization" + "github.com/giantswarm/observability-operator/pkg/common/password" "github.com/giantswarm/observability-operator/pkg/monitoring/heartbeat" "github.com/giantswarm/observability-operator/pkg/monitoring/prometheusagent" //+kubebuilder:scaffold:imports @@ -49,17 +50,19 @@ var ( scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") - metricsAddr string - enableLeaderElection bool - probeAddr string - secureMetrics bool - enableHTTP2 bool - managementClusterCustomer string - managementClusterName string - managementClusterPipeline string - managementClusterRegion string - monitoringEnabled bool - prometheusVersion string + metricsAddr string + enableLeaderElection bool + probeAddr string + secureMetrics bool + enableHTTP2 bool + managementClusterBaseDomain string + managementClusterCustomer string + managementClusterInsecureCA bool + managementClusterName string + managementClusterPipeline string + managementClusterRegion string + monitoringEnabled bool + prometheusVersion string ) const ( @@ -85,8 +88,12 @@ func main() { "If set the metrics endpoint is served securely") flag.BoolVar(&enableHTTP2, "enable-http2", false, "If set, HTTP/2 will be enabled for the metrics and webhook servers") + flag.StringVar(&managementClusterBaseDomain, "management-cluster-base-domain", "", + "The base domain of the management cluster.") flag.StringVar(&managementClusterCustomer, "management-cluster-customer", "", "The customer of the management cluster.") + flag.BoolVar(&managementClusterInsecureCA, "management-cluster-insecure-ca", false, + "Flag to indicate if the management cluster has an insecure CA that should be trusted") flag.StringVar(&managementClusterName, "management-cluster-name", "", "The name of the management cluster.") flag.StringVar(&managementClusterPipeline, "management-cluster-pipeline", "", @@ -157,10 +164,12 @@ func main() { record.InitFromRecorder(mgr.GetEventRecorderFor("observability-operator")) var managementCluster common.ManagementCluster = common.ManagementCluster{ - Customer: managementClusterCustomer, - Name: managementClusterName, - Pipeline: managementClusterPipeline, - Region: managementClusterRegion, + BaseDomain: managementClusterBaseDomain, + Customer: managementClusterCustomer, + InsecureCA: managementClusterInsecureCA, + Name: managementClusterName, + Pipeline: managementClusterPipeline, + Region: managementClusterRegion, } var opsgenieApiKey = os.Getenv(OpsgenieApiKey) @@ -181,11 +190,12 @@ func main() { prometheusAgentService := prometheusagent.PrometheusAgentService{ Client: mgr.GetClient(), OrganizationRepository: organizationRepository, + PasswordManager: password.SimpleManager{}, ManagementCluster: managementCluster, PrometheusVersion: prometheusVersion, } - if err = (&controller.ClusterMonitoringReconciler{ + if err = (&controller.ClusterMonitoringReconciler{ Client: mgr.GetClient(), ManagementCluster: managementCluster, HeartbeatRepository: heartbeatRepository, diff --git a/pkg/common/password/manager.go b/pkg/common/password/manager.go new file mode 100644 index 00000000..80c2726b --- /dev/null +++ b/pkg/common/password/manager.go @@ -0,0 +1,21 @@ +package password + +import ( + "crypto/rand" + "encoding/hex" +) + +type Manager interface { + GeneratePassword(length int) (string, error) +} + +type SimpleManager struct { +} + +func (m SimpleManager) GeneratePassword(length int) (string, error) { + bytes := make([]byte, length) + if _, err := rand.Read(bytes); err != nil { + return "", err + } + return hex.EncodeToString(bytes), nil +} diff --git a/pkg/common/types.go b/pkg/common/types.go index f2b5c382..96fd8a95 100644 --- a/pkg/common/types.go +++ b/pkg/common/types.go @@ -33,8 +33,12 @@ const ( ) type ManagementCluster struct { + // BaseDomain is the base domain of the management cluster. + BaseDomain string // Customer is the customer name of the management cluster. Customer string + // InsecureCA is a flag to indicate if the management cluster has an insecure CA that should be truster + InsecureCA bool // Name is the name of the management cluster. Name string // Pipeline is the pipeline name of the management cluster. diff --git a/pkg/monitoring/prometheusagent/config.go b/pkg/monitoring/prometheusagent/config.go new file mode 100644 index 00000000..2605e43c --- /dev/null +++ b/pkg/monitoring/prometheusagent/config.go @@ -0,0 +1,38 @@ +package prometheusagent + +import ( + "context" + "fmt" + "net" + + "github.com/pkg/errors" + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" + + "github.com/giantswarm/observability-operator/pkg/monitoring/mimir/querier" + "github.com/giantswarm/observability-operator/pkg/monitoring/prometheusagent/shards" +) + +func getPrometheusAgentRemoteWriteConfigName(cluster *clusterv1.Cluster) string { + return fmt.Sprintf("%s-remote-write-config", cluster.Name) +} + +func getServicePriority(cluster *clusterv1.Cluster) string { + if servicePriority, ok := cluster.GetLabels()[servicePriorityLabel]; ok && servicePriority != "" { + return servicePriority + } + return defaultServicePriority +} + +// We want to compute the number of shards based on the number of nodes. +func getShardsCountForCluster(ctx context.Context, cluster *clusterv1.Cluster, currentShardCount int) (int, error) { + headSeries, err := querier.QueryTSDBHeadSeries(ctx, cluster.Name) + if err != nil { + // If prometheus is not accessible (for instance, not running because this is a new cluster, we check if prometheus is accessible) + var dnsError *net.DNSError + if errors.As(err, &dnsError) { + return shards.ComputeShards(currentShardCount, defaultShards), nil + } + return 0, errors.WithStack(err) + } + return shards.ComputeShards(currentShardCount, headSeries), nil +} diff --git a/pkg/monitoring/prometheusagent/parsing.go b/pkg/monitoring/prometheusagent/parsing.go new file mode 100644 index 00000000..38217cdb --- /dev/null +++ b/pkg/monitoring/prometheusagent/parsing.go @@ -0,0 +1,36 @@ +package prometheusagent + +import ( + "gopkg.in/yaml.v2" + + "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + + "github.com/giantswarm/observability-operator/pkg/monitoring/prometheusagent/remotewrite" +) + +func readCurrentShardsFromConfig(configMap corev1.ConfigMap) (int, error) { + remoteWriteConfig := remotewrite.RemoteWriteConfig{} + err := yaml.Unmarshal([]byte(configMap.Data["values"]), &remoteWriteConfig) + if err != nil { + return 0, errors.WithStack(err) + } + + return remoteWriteConfig.PrometheusAgentConfig.Shards, nil +} + +func readRemoteWritePasswordFromSecret(secret corev1.Secret) (string, error) { + remoteWriteConfig := remotewrite.RemoteWriteConfig{} + err := yaml.Unmarshal(secret.Data["values"], &remoteWriteConfig) + if err != nil { + return "", errors.WithStack(err) + } + + for _, rw := range remoteWriteConfig.PrometheusAgentConfig.RemoteWrite { + if rw.Name == "prometheus-meta-operator" { + return rw.Password, nil + } + } + + return "", errors.New("remote write password not found in secret") +} diff --git a/pkg/monitoring/prometheusagent/secret.go b/pkg/monitoring/prometheusagent/secret.go new file mode 100644 index 00000000..b279bc88 --- /dev/null +++ b/pkg/monitoring/prometheusagent/secret.go @@ -0,0 +1,11 @@ +package prometheusagent + +import ( + "fmt" + + clusterv1 "sigs.k8s.io/cluster-api/api/v1beta1" +) + +func getPrometheusAgentRemoteWriteSecretName(cluster *clusterv1.Cluster) string { + return fmt.Sprintf("%s-remote-write-secret", cluster.Name) +} diff --git a/pkg/monitoring/prometheusagent/service.go b/pkg/monitoring/prometheusagent/service.go index 29d8d18c..8b04a568 100644 --- a/pkg/monitoring/prometheusagent/service.go +++ b/pkg/monitoring/prometheusagent/service.go @@ -3,11 +3,11 @@ package prometheusagent import ( "context" "fmt" - "net" "reflect" "github.com/go-logr/logr" "github.com/pkg/errors" + promv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" "gopkg.in/yaml.v2" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" @@ -19,10 +19,9 @@ import ( "github.com/giantswarm/observability-operator/pkg/common" "github.com/giantswarm/observability-operator/pkg/common/organization" + "github.com/giantswarm/observability-operator/pkg/common/password" "github.com/giantswarm/observability-operator/pkg/monitoring" - "github.com/giantswarm/observability-operator/pkg/monitoring/mimir/querier" "github.com/giantswarm/observability-operator/pkg/monitoring/prometheusagent/remotewrite" - "github.com/giantswarm/observability-operator/pkg/monitoring/prometheusagent/shards" ) const ( @@ -33,11 +32,14 @@ const ( // servicePriorityLabel is the label used to determine the priority of a service. servicePriorityLabel string = "giantswarm.io/service-priority" + + remoteWriteEndpointTemplateURL = "https://%s/%s/api/v1/write" ) type PrometheusAgentService struct { client.Client organization.OrganizationRepository + PasswordManager password.Manager common.ManagementCluster PrometheusVersion string } @@ -45,8 +47,26 @@ type PrometheusAgentService struct { // ensurePrometheusAgentRemoteWriteConfig ensures that the prometheus remote write config is present in the cluster. func (pas *PrometheusAgentService) ReconcilePrometheusAgentRemoteWriteConfig(ctx context.Context, cluster *clusterv1.Cluster) error { logger := log.FromContext(ctx).WithValues("cluster", cluster.Name) - logger.Info("ensuring prometheus remote write config") + logger.Info("ensuring prometheus agent remote write configuration") + + err := pas.createOrUpdateConfig(ctx, cluster, logger) + if err != nil { + logger.Error(err, "failed to create or update prometheus agent remote write config") + return errors.WithStack(err) + } + + err = pas.createOrUpdateSecret(ctx, cluster, logger) + if err != nil { + logger.Error(err, "failed to create or update prometheus agent remote write secret") + return errors.WithStack(err) + } + logger.Info("ensured prometheus agent remote write configuration") + + return nil +} + +func (pas PrometheusAgentService) createOrUpdateConfig(ctx context.Context, cluster *clusterv1.Cluster, logger logr.Logger) error { objectKey := client.ObjectKey{ Name: getPrometheusAgentRemoteWriteConfigName(cluster), Namespace: cluster.GetNamespace(), @@ -83,35 +103,98 @@ func (pas *PrometheusAgentService) ReconcilePrometheusAgentRemoteWriteConfig(ctx return errors.WithStack(err) } } + return nil +} - logger.Info("ensured prometheus remote write config") +func (pas PrometheusAgentService) createOrUpdateSecret(ctx context.Context, cluster *clusterv1.Cluster, logger logr.Logger) error { + objectKey := client.ObjectKey{ + Name: getPrometheusAgentRemoteWriteSecretName(cluster), + Namespace: cluster.GetNamespace(), + } + + current := &corev1.Secret{} + // Get the current secret if it exists. + err := pas.Client.Get(ctx, objectKey, current) + if apierrors.IsNotFound(err) { + logger.Info("generating password for the prometheus agent") + password, err := pas.PasswordManager.GeneratePassword(32) + if err != nil { + logger.Error(err, "failed to generate the prometheus agent password") + return errors.WithStack(err) + } + logger.Info("generated password for the prometheus agent") + + secret, err := pas.buildRemoteWriteSecret(ctx, cluster, logger, password) + if err != nil { + return errors.WithStack(err) + } + err = pas.Client.Create(ctx, secret) + return errors.WithStack(err) + } else if err != nil { + return errors.WithStack(err) + } + + // As it takes a long time to apply the new password to the agent due to a built-in delay in the app-platform, + // we keep the already generated remote write password. + password, err := readRemoteWritePasswordFromSecret(*current) + if err != nil { + return errors.WithStack(err) + } + + desired, err := pas.buildRemoteWriteSecret(ctx, cluster, logger, password) + if err != nil { + return errors.WithStack(err) + } + if !reflect.DeepEqual(current.Data, desired.Data) { + err = pas.Client.Patch(ctx, current, client.MergeFrom(desired)) + if err != nil { + return errors.WithStack(err) + } + } return nil } func (pas *PrometheusAgentService) DeletePrometheusAgentRemoteWriteConfig(ctx context.Context, cluster *clusterv1.Cluster) error { logger := log.FromContext(ctx).WithValues("cluster", cluster.Name) - logger.Info("deleting prometheus remote write config") + logger.Info("deleting prometheus agent remote write configuration") + err := pas.deleteConfigMap(ctx, cluster) + if err != nil { + logger.Error(err, "failed to delete prometheus agent remote write config") + return errors.WithStack(err) + } + + err = pas.deleteSecret(ctx, cluster) + if err != nil { + logger.Error(err, "failed to delete prometheus agent remote write secret") + return errors.WithStack(err) + } + + logger.Info("deleted prometheus agent remote write configuration") + + return nil +} + +func (pas PrometheusAgentService) deleteConfigMap(ctx context.Context, cluster *clusterv1.Cluster) error { objectKey := client.ObjectKey{ Name: getPrometheusAgentRemoteWriteConfigName(cluster), Namespace: cluster.GetNamespace(), } - current := &corev1.ConfigMap{} // Get the current configmap if it exists. err := pas.Client.Get(ctx, objectKey, current) if apierrors.IsNotFound(err) { - // We ignore cases where the configmap is not found (it it was manually deleted for instance) + // Ignore cases where the configmap is not found (if it was manually deleted, for instance). return nil } else if err != nil { return errors.WithStack(err) } - desired := current.DeepCopy() // Delete the finalizer + desired := current.DeepCopy() controllerutil.RemoveFinalizer(desired, monitoring.MonitoringFinalizer) - err = pas.Client.Patch(ctx, current, client.MergeFrom(desired)) + err = pas.Client.Patch(ctx, desired, client.MergeFrom(current)) if err != nil { return errors.WithStack(err) } @@ -120,9 +203,36 @@ func (pas *PrometheusAgentService) DeletePrometheusAgentRemoteWriteConfig(ctx co if err != nil { return errors.WithStack(err) } + return nil +} + +func (pas PrometheusAgentService) deleteSecret(ctx context.Context, cluster *clusterv1.Cluster) error { + objectKey := client.ObjectKey{ + Name: getPrometheusAgentRemoteWriteSecretName(cluster), + Namespace: cluster.GetNamespace(), + } + current := &corev1.Secret{} + // Get the current secret if it exists. + err := pas.Client.Get(ctx, objectKey, current) + if apierrors.IsNotFound(err) { + // Ignore cases where the secret is not found (if it was manually deleted, for instance). + return nil + } else if err != nil { + return errors.WithStack(err) + } - logger.Info("deleted prometheus remote write config") + // Delete the finalizer + desired := current.DeepCopy() + controllerutil.RemoveFinalizer(desired, monitoring.MonitoringFinalizer) + err = pas.Client.Patch(ctx, current, client.MergeFrom(desired)) + if err != nil { + return errors.WithStack(err) + } + err = pas.Client.Delete(ctx, desired) + if err != nil { + return errors.WithStack(err) + } return nil } @@ -189,37 +299,50 @@ func (pas PrometheusAgentService) buildRemoteWriteConfig(ctx context.Context, cl }, nil } -func getPrometheusAgentRemoteWriteConfigName(cluster *clusterv1.Cluster) string { - return fmt.Sprintf("%s-remote-write-config", cluster.Name) -} - -func readCurrentShardsFromConfig(configMap corev1.ConfigMap) (int, error) { - remoteWriteConfig := remotewrite.RemoteWriteConfig{} - err := yaml.Unmarshal([]byte(configMap.Data["values"]), &remoteWriteConfig) - if err != nil { - return 0, errors.WithStack(err) +func (pas PrometheusAgentService) buildRemoteWriteSecret(ctx context.Context, cluster *clusterv1.Cluster, logger logr.Logger, password string) (*corev1.Secret, error) { + url := fmt.Sprintf(remoteWriteEndpointTemplateURL, pas.ManagementCluster.BaseDomain, cluster.Name) + remoteWriteConfig := remotewrite.RemoteWriteConfig{ + PrometheusAgentConfig: remotewrite.PrometheusAgentConfig{ + RemoteWrite: []remotewrite.RemoteWrite{ + { + RemoteWriteSpec: promv1.RemoteWriteSpec{ + URL: url, + Name: "prometheus-meta-operator", + RemoteTimeout: "60s", + QueueConfig: &promv1.QueueConfig{ + Capacity: 30000, + MaxSamplesPerSend: 150000, + MaxShards: 10, + }, + TLSConfig: &promv1.TLSConfig{ + SafeTLSConfig: promv1.SafeTLSConfig{ + InsecureSkipVerify: pas.ManagementCluster.InsecureCA, + }, + }, + }, + Username: cluster.Name, + Password: password, + }, + }, + }, } - return remoteWriteConfig.PrometheusAgentConfig.Shards, nil -} - -// We want to compute the number of shards based on the number of nodes. -func getShardsCountForCluster(ctx context.Context, cluster *clusterv1.Cluster, currentShardCount int) (int, error) { - headSeries, err := querier.QueryTSDBHeadSeries(ctx, cluster.Name) + marshalledValues, err := yaml.Marshal(remoteWriteConfig) if err != nil { - // If prometheus is not accessible (for instance, not running because this is a new cluster, we check if prometheus is accessible) - var dnsError *net.DNSError - if errors.As(err, &dnsError) { - return shards.ComputeShards(currentShardCount, defaultShardCount), nil - } - return 0, errors.WithStack(err) + return nil, errors.WithStack(err) } - return shards.ComputeShards(currentShardCount, headSeries), nil -} -func getServicePriority(cluster *clusterv1.Cluster) string { - if servicePriority, ok := cluster.GetLabels()[servicePriorityLabel]; ok && servicePriority != "" { - return servicePriority - } - return defaultServicePriority + return &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: getPrometheusAgentRemoteWriteSecretName(cluster), + Namespace: cluster.Namespace, + Finalizers: []string{ + monitoring.MonitoringFinalizer, + }, + }, + Data: map[string][]byte{ + "values": marshalledValues, + }, + Type: "Opaque", + }, nil }