diff --git a/config/default/exporter_auth_proxy_patch.yaml b/config/default/exporter_auth_proxy_patch.yaml new file mode 100644 index 0000000..7994618 --- /dev/null +++ b/config/default/exporter_auth_proxy_patch.yaml @@ -0,0 +1,36 @@ +# This patch inject a sidecar container which is a HTTP proxy for the +# exporter, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alerts-exporter +spec: + template: + spec: + containers: + - name: kube-rbac-proxy + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + image: gcr.io/kubebuilder/kube-rbac-proxy:v0.13.1 + args: + - "--secure-listen-address=0.0.0.0:8443" + - "--upstream=http://127.0.0.1:8080/" + - "--logtostderr=true" + - "--v=0" + ports: + - containerPort: 8443 + protocol: TCP + name: https + resources: + limits: + cpu: 500m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + - name: exporter + args: + - "--listen-addr=127.0.0.1:8080" diff --git a/config/default/exporter_config_patch.yaml b/config/default/exporter_config_patch.yaml new file mode 100644 index 0000000..e3c0df3 --- /dev/null +++ b/config/default/exporter_config_patch.yaml @@ -0,0 +1,9 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alerts-exporter +spec: + template: + spec: + containers: + - name: exporter diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml new file mode 100644 index 0000000..f66ce6a --- /dev/null +++ b/config/default/kustomization.yaml @@ -0,0 +1,11 @@ +# Adds namespace to all resources. +namespace: alerts-exporter + +bases: +- ../rbac +- ../exporter +- ../prometheus + +patchesStrategicMerge: +# Protect the /metrics endpoint by putting it behind auth. +- exporter_auth_proxy_patch.yaml diff --git a/config/deployment/deployment.yaml b/config/deployment/deployment.yaml deleted file mode 100644 index f523d40..0000000 --- a/config/deployment/deployment.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: alerts-collector -spec: - selector: - matchLabels: - app: alerts-collector - template: - metadata: - labels: - app: alerts-collector - spec: - containers: - - name: collector - image: local.dev/alerts-collector:latest - resources: - resources: - memory: "64Mi" - cpu: "50m" - limits: - memory: "128Mi" - cpu: "500m" - ports: - - containerPort: 8080 diff --git a/config/exporter/exporter.yaml b/config/exporter/exporter.yaml new file mode 100644 index 0000000..c3528c9 --- /dev/null +++ b/config/exporter/exporter.yaml @@ -0,0 +1,58 @@ +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: alerts-exporter + app.kubernetes.io/name: namespace + app.kubernetes.io/instance: exporter + app.kubernetes.io/component: exporter + app.kubernetes.io/created-by: alerts-exporter + app.kubernetes.io/part-of: alerts-exporter + app.kubernetes.io/managed-by: kustomize + name: alerts-exporter +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alerts-exporter + labels: + control-plane: alerts-exporter + app.kubernetes.io/name: deployment + app.kubernetes.io/instance: exporter + app.kubernetes.io/component: exporter + app.kubernetes.io/created-by: alerts-exporter + app.kubernetes.io/part-of: alerts-exporter + app.kubernetes.io/managed-by: kustomize +spec: + selector: + matchLabels: + control-plane: alerts-exporter + replicas: 1 + template: + metadata: + annotations: + kubectl.kubernetes.io/default-container: exporter + labels: + control-plane: alerts-exporter + spec: + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + containers: + - name: exporter + image: ghcr.io/appuio/alerts_exporter:latest + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - "ALL" + resources: + limits: + cpu: 100m + memory: 128Mi + requests: + cpu: 10m + memory: 64Mi + serviceAccountName: alerts-exporter + terminationGracePeriodSeconds: 10 diff --git a/config/exporter/kustomization.yaml b/config/exporter/kustomization.yaml new file mode 100644 index 0000000..8a25efa --- /dev/null +++ b/config/exporter/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- exporter.yaml diff --git a/config/openshift4/exporter_openshift_patch.yaml b/config/openshift4/exporter_openshift_patch.yaml new file mode 100644 index 0000000..b24d59c --- /dev/null +++ b/config/openshift4/exporter_openshift_patch.yaml @@ -0,0 +1,27 @@ +# This patch inject a sidecar container which is a HTTP proxy for the +# exporter, it performs RBAC authorization against the Kubernetes API using SubjectAccessReviews. +apiVersion: apps/v1 +kind: Deployment +metadata: + name: alerts-exporter +spec: + template: + spec: + containers: + - name: exporter + args: + - --listen-addr=127.0.0.1:8080 + - --tls + - --host=alertmanager-operated.openshift-monitoring.svc.cluster.local:9095 + - --tls-server-name=alertmanager-main.openshift-monitoring.svc.cluster.local + - --k8s-bearer-token-auth + - --tls-ca-cert=/etc/ssl/certs/serving-certs/service-ca.crt + volumeMounts: + - mountPath: /etc/ssl/certs/serving-certs/ + name: ca-bundle + readOnly: true + volumes: + - configMap: + defaultMode: 288 + name: openshift-service-ca.crt + name: ca-bundle diff --git a/config/openshift4/exporter_role_patch.yaml b/config/openshift4/exporter_role_patch.yaml new file mode 100644 index 0000000..5098a73 --- /dev/null +++ b/config/openshift4/exporter_role_patch.yaml @@ -0,0 +1,12 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: exporter-role +rules: +- apiGroups: + - monitoring.coreos.com + resources: + - alertmanagers + verbs: + - patch diff --git a/config/openshift4/kustomization.yaml b/config/openshift4/kustomization.yaml new file mode 100644 index 0000000..9103039 --- /dev/null +++ b/config/openshift4/kustomization.yaml @@ -0,0 +1,7 @@ +bases: +- ../default + +patchesStrategicMerge: +# Protect the /metrics endpoint by putting it behind auth. +- exporter_openshift_patch.yaml +- exporter_role_patch.yaml \ No newline at end of file diff --git a/config/prometheus/kustomization.yaml b/config/prometheus/kustomization.yaml new file mode 100644 index 0000000..ed13716 --- /dev/null +++ b/config/prometheus/kustomization.yaml @@ -0,0 +1,2 @@ +resources: +- monitor.yaml diff --git a/config/prometheus/monitor.yaml b/config/prometheus/monitor.yaml new file mode 100644 index 0000000..318a334 --- /dev/null +++ b/config/prometheus/monitor.yaml @@ -0,0 +1,30 @@ + +# Prometheus Monitor Service (Metrics) +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + labels: + control-plane: alerts-exporter + app.kubernetes.io/name: servicemonitor + app.kubernetes.io/instance: alerts-exporter-metrics-monitor + app.kubernetes.io/component: metrics + app.kubernetes.io/created-by: alerts-exporter + app.kubernetes.io/part-of: alerts-exporter + app.kubernetes.io/managed-by: kustomize + name: alerts-exporter-metrics-monitor +spec: + endpoints: + - path: /metrics + port: https + scheme: https + bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + tlsConfig: + insecureSkipVerify: true + metricRelabelings: + - action: keep + regex: alerts_exporter_.+ + sourceLabels: + - __name__ + selector: + matchLabels: + control-plane: alerts-exporter diff --git a/config/rbac/auth_proxy_client_clusterrole.yaml b/config/rbac/auth_proxy_client_clusterrole.yaml new file mode 100644 index 0000000..d74b05f --- /dev/null +++ b/config/rbac/auth_proxy_client_clusterrole.yaml @@ -0,0 +1,16 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: metrics-reader + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: alerts-exporter + app.kubernetes.io/part-of: alerts-exporter + app.kubernetes.io/managed-by: kustomize + name: metrics-reader +rules: +- nonResourceURLs: + - "/metrics" + verbs: + - get diff --git a/config/rbac/auth_proxy_role.yaml b/config/rbac/auth_proxy_role.yaml new file mode 100644 index 0000000..1476552 --- /dev/null +++ b/config/rbac/auth_proxy_role.yaml @@ -0,0 +1,24 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + labels: + app.kubernetes.io/name: clusterrole + app.kubernetes.io/instance: proxy-role + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: alerts-exporter + app.kubernetes.io/part-of: alerts-exporter + app.kubernetes.io/managed-by: kustomize + name: proxy-role +rules: +- apiGroups: + - authentication.k8s.io + resources: + - tokenreviews + verbs: + - create +- apiGroups: + - authorization.k8s.io + resources: + - subjectaccessreviews + verbs: + - create diff --git a/config/rbac/auth_proxy_role_binding.yaml b/config/rbac/auth_proxy_role_binding.yaml new file mode 100644 index 0000000..8f9c6cf --- /dev/null +++ b/config/rbac/auth_proxy_role_binding.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: proxy-rolebinding + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: alerts-exporter + app.kubernetes.io/part-of: alerts-exporter + app.kubernetes.io/managed-by: kustomize + name: proxy-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: proxy-role +subjects: +- kind: ServiceAccount + name: alerts-exporter diff --git a/config/rbac/auth_proxy_service.yaml b/config/rbac/auth_proxy_service.yaml new file mode 100644 index 0000000..8ebc009 --- /dev/null +++ b/config/rbac/auth_proxy_service.yaml @@ -0,0 +1,20 @@ +apiVersion: v1 +kind: Service +metadata: + labels: + control-plane: alerts-exporter + app.kubernetes.io/name: service + app.kubernetes.io/instance: alerts-exporter-metrics-service + app.kubernetes.io/component: kube-rbac-proxy + app.kubernetes.io/created-by: alerts-exporter + app.kubernetes.io/part-of: alerts-exporter + app.kubernetes.io/managed-by: kustomize + name: alerts-exporter-metrics-service +spec: + ports: + - name: https + port: 8443 + protocol: TCP + targetPort: https + selector: + control-plane: alerts-exporter diff --git a/config/rbac/kustomization.yaml b/config/rbac/kustomization.yaml new file mode 100644 index 0000000..31b3f7c --- /dev/null +++ b/config/rbac/kustomization.yaml @@ -0,0 +1,9 @@ +resources: +- service_account.yaml +- role.yaml +- role_binding.yaml + +- auth_proxy_service.yaml +- auth_proxy_role.yaml +- auth_proxy_role_binding.yaml +- auth_proxy_client_clusterrole.yaml diff --git a/config/rbac/role.yaml b/config/rbac/role.yaml new file mode 100644 index 0000000..2787f9d --- /dev/null +++ b/config/rbac/role.yaml @@ -0,0 +1,10 @@ +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: exporter-role +rules: +- nonResourceURLs: + - /api/v2/alerts + verbs: + - get diff --git a/config/rbac/role_binding.yaml b/config/rbac/role_binding.yaml new file mode 100644 index 0000000..493a547 --- /dev/null +++ b/config/rbac/role_binding.yaml @@ -0,0 +1,18 @@ +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + labels: + app.kubernetes.io/name: clusterrolebinding + app.kubernetes.io/instance: exporter-rolebinding + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: alerts-exporter + app.kubernetes.io/part-of: alerts-exporter + app.kubernetes.io/managed-by: kustomize + name: exporter-rolebinding +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: exporter-role +subjects: +- kind: ServiceAccount + name: alerts-exporter diff --git a/config/rbac/service_account.yaml b/config/rbac/service_account.yaml new file mode 100644 index 0000000..e56b026 --- /dev/null +++ b/config/rbac/service_account.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + app.kubernetes.io/name: serviceaccount + app.kubernetes.io/instance: alerts-exporter + app.kubernetes.io/component: rbac + app.kubernetes.io/created-by: alerts-exporter + app.kubernetes.io/part-of: alerts-exporter + app.kubernetes.io/managed-by: kustomize + name: alerts-exporter diff --git a/internal/saauth/saauth.go b/internal/saauth/saauth.go new file mode 100644 index 0000000..fab81f9 --- /dev/null +++ b/internal/saauth/saauth.go @@ -0,0 +1,93 @@ +package saauth + +import ( + "context" + "fmt" + "log" + "os" + "sync/atomic" + "time" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" +) + +// NewServiceAccountAuthInfoWriter creates a new ServiceAccountAuthInfoWriter. +// ServiceAccountAuthInfoWriter implements Kubernetes service account authentication. +// It reads the token from the given file and refreshes it every refreshInterval. +// If refreshInterval is 0, it defaults to 5 minutes. +// If saFile is empty, it defaults to /var/run/secrets/kubernetes.io/serviceaccount/token. +// An error is returned if the initial token read fails. Further read failures do not cause an error. +func NewServiceAccountAuthInfoWriter(saFile string, refreshInterval time.Duration) (*ServiceAccountAuthInfoWriter, error) { + if saFile == "" { + saFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + } + if refreshInterval == 0 { + refreshInterval = 5 * time.Minute + } + + w := &ServiceAccountAuthInfoWriter{ + ticker: time.NewTicker(refreshInterval), + saFile: saFile, + } + + t, err := w.readTokenFromFile() + if err != nil { + return nil, fmt.Errorf("failed to read token from file: %w", err) + } + w.storeToken(t) + + ctx, cancel := context.WithCancel(context.Background()) + w.cancel = cancel + + go func() { + for { + select { + case <-ctx.Done(): + return + case <-w.ticker.C: + t, err := w.readTokenFromFile() + if err != nil { + log.Printf("failed to read token from file: %v", err) + continue + } + w.storeToken(t) + } + } + }() + + return w, nil +} + +// ServiceAccountAuthInfoWriter implements Kubernetes service account authentication. +type ServiceAccountAuthInfoWriter struct { + saFile string + token atomic.Value + ticker *time.Ticker + cancel context.CancelFunc +} + +// AuthenticateRequest implements the runtime.ClientAuthInfoWriter interface. +// It sets the Authorization header to the current token. +func (s *ServiceAccountAuthInfoWriter) AuthenticateRequest(r runtime.ClientRequest, _ strfmt.Registry) error { + return r.SetHeaderParam(runtime.HeaderAuthorization, "Bearer "+s.loadToken()) +} + +// Stop stops the token refresh +func (s *ServiceAccountAuthInfoWriter) Stop() { + s.cancel() + s.ticker.Stop() +} + +func (s *ServiceAccountAuthInfoWriter) storeToken(t string) { + s.token.Store(t) +} + +func (s *ServiceAccountAuthInfoWriter) loadToken() string { + return s.token.Load().(string) +} + +func (s *ServiceAccountAuthInfoWriter) readTokenFromFile() (string, error) { + t, err := os.ReadFile(s.saFile) + return string(t), err +} diff --git a/internal/saauth/saauth_test.go b/internal/saauth/saauth_test.go new file mode 100644 index 0000000..b3fb9ad --- /dev/null +++ b/internal/saauth/saauth_test.go @@ -0,0 +1,40 @@ +package saauth_test + +import ( + "os" + "testing" + "time" + + "github.com/appuio/alerts_exporter/internal/saauth" + "github.com/go-openapi/runtime" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func Test_ServiceAccountAuthInfoWriter_AuthenticateRequest(t *testing.T) { + tokenFile := t.TempDir() + "/token" + + require.NoError(t, os.WriteFile(tokenFile, []byte("token"), 0644)) + + subject, err := saauth.NewServiceAccountAuthInfoWriter(tokenFile, time.Millisecond) + require.NoError(t, err) + defer subject.Stop() + + r := new(runtime.TestClientRequest) + require.NoError(t, subject.AuthenticateRequest(r, nil)) + require.Equal(t, "Bearer token", r.GetHeaderParams().Get("Authorization")) + + require.NoError(t, os.WriteFile(tokenFile, []byte("new-token"), 0644)) + require.EventuallyWithT(t, func(t *assert.CollectT) { + r := new(runtime.TestClientRequest) + require.NoError(t, subject.AuthenticateRequest(r, nil)) + require.Equal(t, "Bearer new-token", r.GetHeaderParams().Get("Authorization")) + }, time.Second, time.Millisecond) +} + +func Test_NewServiceAccountAuthInfoWriter_TokenReadErr(t *testing.T) { + tokenFile := t.TempDir() + "/token" + + _, err := saauth.NewServiceAccountAuthInfoWriter(tokenFile, time.Millisecond) + require.ErrorIs(t, err, os.ErrNotExist) +} diff --git a/main.go b/main.go index f615931..9657b8c 100644 --- a/main.go +++ b/main.go @@ -7,12 +7,15 @@ import ( "net/http" alertscollector "github.com/appuio/alerts_exporter/internal/alerts_collector" + "github.com/appuio/alerts_exporter/internal/saauth" openapiclient "github.com/go-openapi/runtime/client" alertmanagerclient "github.com/prometheus/alertmanager/api/v2/client" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" ) +var listenAddr string + var host string var withInhibited, withSilenced, withUnprocessed, withActive bool var filters stringSliceFlag @@ -21,8 +24,11 @@ var tlsCert, tlsCertKey, tlsCaCert, tlsServerName string var tlsInsecure bool var useTLS bool var bearerToken string +var k8sBearerTokenAuth bool func main() { + flag.StringVar(&listenAddr, "listen-addr", ":8080", "The addr to listen on") + flag.StringVar(&host, "host", "localhost:9093", "The host of the Alertmanager") flag.BoolVar(&useTLS, "tls", false, "Use TLS when connecting to Alertmanager") @@ -33,6 +39,7 @@ func main() { flag.BoolVar(&tlsInsecure, "insecure", false, "Disable TLS host verification") flag.StringVar(&bearerToken, "bearer-token", "", "Bearer token to use for authentication") + flag.BoolVar(&k8sBearerTokenAuth, "k8s-bearer-token-auth", false, "Use Kubernetes service account bearer token for authentication") flag.BoolVar(&withActive, "with-active", true, "Query for active alerts") flag.BoolVar(&withInhibited, "with-inhibited", true, "Query for inhibited alerts") @@ -67,6 +74,14 @@ func main() { if bearerToken != "" { rt.DefaultAuthentication = openapiclient.BearerToken(bearerToken) } + if k8sBearerTokenAuth { + sa, err := saauth.NewServiceAccountAuthInfoWriter("", 0) + if err != nil { + log.Fatal(err) + } + defer sa.Stop() + rt.DefaultAuthentication = sa + } ac := alertmanagerclient.New(rt, nil) @@ -85,8 +100,8 @@ func main() { // Expose metrics and custom registry via an HTTP server // using the HandleFor function. "/metrics" is the usual endpoint for that. http.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) - log.Println("Listening on `:8080/metrics`") - log.Fatal(http.ListenAndServe(":8080", nil)) + log.Printf("Listening on `%s`", listenAddr) + log.Fatal(http.ListenAndServe(listenAddr, nil)) } type stringSliceFlag []string