From 4d92561b75b8799a6a5f5c05f2b26b2dfaac65a7 Mon Sep 17 00:00:00 2001 From: Levi Pearson Date: Thu, 7 Nov 2024 13:35:43 -0700 Subject: [PATCH] [LE-1204] Initial version of kubernetes-log-collector chart (#12) * Initial version of kubernetes-log-collector chart * Remove blank line at end of values.yaml * Ensure template container env var values are all strings * Add ci/test-values.yaml * Update README and forcibly quote numeric env values in template * Fix typo in offload URL * Give ci cluster a directory it can mount * Add CLUSTER_ID parameter, clean up network connectivity docs * Clarify docs for labels value * Remove email parameter from image pull secret generation docs --- charts/kubernetes-log-collector/CHANGELOG.md | 11 ++ charts/kubernetes-log-collector/Chart.yaml | 26 +++ charts/kubernetes-log-collector/README.md | 183 ++++++++++++++++++ .../kubernetes-log-collector/README.md.gotmpl | 167 ++++++++++++++++ .../ci/test-values.yaml | 5 + .../templates/NOTES.txt | 10 + .../templates/_helpers.tpl | 51 +++++ .../templates/daemonset.yaml | 88 +++++++++ charts/kubernetes-log-collector/values.yaml | 85 ++++++++ 9 files changed, 626 insertions(+) create mode 100644 charts/kubernetes-log-collector/CHANGELOG.md create mode 100644 charts/kubernetes-log-collector/Chart.yaml create mode 100644 charts/kubernetes-log-collector/README.md create mode 100644 charts/kubernetes-log-collector/README.md.gotmpl create mode 100644 charts/kubernetes-log-collector/ci/test-values.yaml create mode 100644 charts/kubernetes-log-collector/templates/NOTES.txt create mode 100644 charts/kubernetes-log-collector/templates/_helpers.tpl create mode 100644 charts/kubernetes-log-collector/templates/daemonset.yaml create mode 100644 charts/kubernetes-log-collector/values.yaml diff --git a/charts/kubernetes-log-collector/CHANGELOG.md b/charts/kubernetes-log-collector/CHANGELOG.md new file mode 100644 index 0000000..af60c6b --- /dev/null +++ b/charts/kubernetes-log-collector/CHANGELOG.md @@ -0,0 +1,11 @@ +# Changelog + +All notable changes to this chart will be documented in this file. + +The chart adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [0.1.0] - 2024-11-22 + +### Added + +- Initial release of the kubernetes-log-collector helm chart diff --git a/charts/kubernetes-log-collector/Chart.yaml b/charts/kubernetes-log-collector/Chart.yaml new file mode 100644 index 0000000..a0678a7 --- /dev/null +++ b/charts/kubernetes-log-collector/Chart.yaml @@ -0,0 +1,26 @@ +apiVersion: v2 +name: kubernetes-log-collector +description: A Helm chart for deploying the Red Canary Kubernetes Log Collector + +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +appVersion: "0.1.0" + +keywords: + - redcanary + - kubernetes + - audit + - logs + +home: https://github.com/redcanaryco/helm-charts/tree/main/charts/kuberntes-log-collector + +maintainers: + - name: Levi Pearson + email: levi.pearson@redcanary.com + +icon: https://redcanary.com/wp-content/themes/redcanary/assets/img/apple-icon-152x152.png diff --git a/charts/kubernetes-log-collector/README.md b/charts/kubernetes-log-collector/README.md new file mode 100644 index 0000000..f3b5440 --- /dev/null +++ b/charts/kubernetes-log-collector/README.md @@ -0,0 +1,183 @@ +# kubernetes-log-collector + +A Helm chart for deploying the Red Canary Kubernetes Log Collector + +This is a log file forwarder specialized to deal with forwarding Kubernetes audit logs to Red Canary. It is designed to do so reliably and with low overhead while re-shaping the logs (primarily the number of log objects per offloaded file) for effective and timely processing. + +## System requirements for the log forwarder +The log forwarder takes very few system resources and requires only read access to the directory the audit logs are being written to and read/write access to a small amount of persistent storage for checkpointing its offload progress. + +It is available in 64-bit x86 and arm variants via a multi-arch container. + +## Compatibility with Kubernetes +This log forwarder requires access to the control plane nodes where the Kubernetes API server component is running. This rules out running it on managed cluster services such as Amazon EKS, Microsoft AKS, or Google GKE. Those services provide other mechanisms for access to the Kubernetes audit logs. + +If your cluster does allow access to the control plane nodes, you must also be able to configure the Kubernetes API server to enable and otherwise configure its audit logging and also make those logs available to a host directory on the node. Exact instructions for doing this unfortunately vary between Kubernetes distributions. + +The log forwarder is known to work with the following distributions: +* custom clusters managed with `kubeadm` +* Rancher `k3s` + +## Prerequisites +* Helm v3.0.0+ +* The container image, tagged and pushed to your private registry +* Credentials for accessing your private registry +* A cluster that allows assigning pods to control plane nodes and configuring of the API audit log service +* Outbound network connectivity on control plane nodes to specific addresses + +### Accessing the container images +See the official Red Canary customer documentation for the repository name and access credentials. + +### Enabling and configuring Kubernetes API audit logging +The exact methods for enabling and configuring audit logging will vary based on the kubernetes distribution, but there are some general tasks that need to be performed. + +**Configuration for `kube-apiserver`** + +This component is a core part of kubernetes, and it must be running for the system to work. It therefore cannot be started through the normal scheduling process. It is also configured entirely via command line parameters. Distributions differ in how startup and passing of arguments to it are managed, so the chart cannot provide any automation. + +If your cluster shows `kube-apiserver` pods running on control plane nodes, your distribution likely uses [static pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/). You will need to determine whether they are auto-generated and managed by a tool (such as is the case with `kubeadm`) or whether you can safely edit the manifests manually. + +Otherwise, the control plane nodes may run `kube-apiserver` as a native service via the distribution's service management tool (`systemd`, etc.) You will need to find and edit or override the service specification to change its configuration. + +In the case of `k3s`, the `kube-apiserver` component is built in to the same service binary as other components. The binary still provides a way to pass eqivalents of the command line parameters to the component; see the [`k3s` documentation](https://docs.k3s.io/security/hardening-guide#api-server-audit-configuration) for details. + +Regardless of how the service and its configuration are managed, you will need to check the same set of configuration parameters: +1. `audit-policy-file`: Path to the file that defines the audit policy configuration +2. `audit-log-path`: When set, requests to the API server are logged to this fully-qualified file path according to policy + +You will need to provide (or possibly edit, if one already exists) a policy file and make sure it is located where the `audit-policy-file` parameter is looking for it. If `kube-apiserver` is deployed via a static pod, you will probably want to provide the file and a log directory to the pod via `volumeMount` parameters. + +The chart is not concerned with the policy file, but you will need to note the value you set for `audit-log-path` (along with how that corresponds to any `volumeMount` directory mapping) to set the `directories.log` chart variable (the directory component of the path, mapped to the host filesystem) and the `config.log_file` chart variable (the base name component of the path). + +**Audit log policy considerations** + +It is unwise to log the full details of every API transaction, so the policy configuration allows you to specify rules by which you can get extra details in some situations and little-to-none in others. + +TODO: Describe what details are most relevant, link to example config files + +### Outbound network connectivity requirements +* https://o433963.ingest.us.sentry.io + + 34.120.195.249 +* https://cwp-ingest.redcanary.io + + 3.143.139.141 + + 3.143.177.78 + + 52.14.101.187 + +Note: cwp-ingest.redcanary.io IPs are static. + +To utilize a HTTP proxy, set the following value during your installation: +```console +--set config.http_proxy="https://HOST:PORT" +``` +## Installation + +### Create a namespace for your installation + +```console +kubectl create namespace +``` + +### Create a secret to hold your private registry credentials + +The image pull secret is used to securely authenticate and authorize container image pulls from your private container registry. *This may not be required in all environments.* + +There are two options for doing this. + +The first method creates the secret by passing the credential information on the command line. Ensure the namespace parameter matches the one you just created. + +```console +kubectl create secret docker-registry \ + --docker-server= \ + --docker-username= \ + --docker-password= \ + --namespace= +``` + +Alternatively, you can create the secret from the JSON description of a logged-in docker session, such as is typically stored at `~/.docker/config.json` after executing a `docker login` command. + +```console +kubectl create secret docker-registry \ + --from-file= \ + --namespace= +``` + +### Add the Red Canary Helm repository to your system + +```console +helm repo add redcanary https://redcanaryco.github.io/helm-charts +helm repo update redcanary +``` + +### Install the `kubernetes-log-collector` chart + +Get the default values file to use as a starting point for configuration. + +```console +helm show values redcanary/kubernetes-log-collector > values.yaml +``` + +Edit the values.yaml file with the following goals in mind: + +Ensure the `image` parameters match your private registry and `imagePullSecrets` includes the name of the secret you created with your registry's credentials. + +Make sure you set `tolerations` and either `nodeSelector` or `affinity` to ensure the DaemonSet will deploy its pods to the nodes in your control plane that are running `kube-apiserver`. + +Set the `directories` parameters to appropriate values so that both `kube-apiserver` and `kubernetes-log-collector` will refer to the same host directory for audit log storage and the collector will have a private persistent state directory. + +Set the `config` parameters according to the credentials provided when provisioning the service (`service_id` and `outpost_token`) and ensure `log_file` matches the base name of the active log file that `kube-apiserver` writes audit logs to. Set the `http_proxy` to your proxy URL if you need one for offloading. + +```console +helm install klc redcanary/kubernetes-log-collector \ + --namespace= \ + --values=values.yaml +``` + +## Removal + +Uninstall the DaemonSet with the following command: + +```console +helm uninstall klc --namespace +``` + +That will stop any running pods, but the namespace and image pull secret will remain until you delete the namespace: + +```console +kubectl delete ns +``` + +## Values + +| Key | Type | Default | Description | +|-----|------|---------|-------------| +| affinity | object | `{}` | affinity selects nodes for the DaemonSet pods to run on via an affinity specification | +| config.cluster_id | string | `"default_cluster"` | cluster_id is an identifier you choose to distinguish this cluster from others using the same service_id | +| config.http_proxy | string | `nil` | http_proxy is the URL of a HTTP(s) proxy to use, if desired | +| config.log_file | string | `"audit.log"` | log_file is the base file name the apiserver is configured to use for its audit log | +| config.offload_after | string | `"60"` | offload_after is the amount of time, in seconds, to wait between offloads if the desired offload_amount has not yet accumulated | +| config.offload_amount | string | `"50000000"` | offload_amount is the amount of log traffic, in bytes, that should ideally be sent per offload | +| config.outpost_token | string | `nil` | outpost_token is the access token assigned when provisioning the service | +| config.service_id | string | `nil` | service_id is the account identifier assigned when provisioning the service | +| directories.logs | string | `"/var/log/kubernetes"` | logs is the directory in the node's root filesystem where the audit log file can be found | +| directories.state | string | `"/run/kubernetes-log-collector"` | state is the directory in the node's root filesystem to store persistent checkpoint state | +| fullnameOverride | string | `nil` | fullnameOverride is a string to fully override the kubernetes-log-collector.fullname template | +| image.pullPolicy | string | `"IfNotPresent"` | pullPolicy is the policy for fetching images from the repository at runtime | +| image.repository | string | `"redcanary-audit-log-forwarder-prod.jfrog.io/audit-log-forwarder-prod"` | repository is the image repository to pull from /audit-log-forwarder-prod | +| image.tag | string | `nil` | tag of the image to deploy. Defaults to the app version of the chart | +| imagePullSecrets | list | `[]` | imagePullSecrets names Secrets that store credentials that are used for accessing the container registry | +| labels | object | `{}` | labels provides extra labels for all the resources created by this chart except the Pod spec | +| nameOverride | string | `nil` | nameOverride is a string to partially override the kubernetes-log-collector.fullname template (will maintain the release name) | +| nodeSelector | object | `{}` | nodeSelector selects nodes for the DaemonSet pods to run on by node label | +| podAnnotations | object | `{}` | podAnnotations provides extra annotations for the deployed pod(s) | +| resources | object | `{}` | resources sets the CPU and memory specifications for the pod(s). | +| tolerations | list | `[]` | tolerations allow the DaemonSet pods to run on nodes with specific control plane taints | + +---------------------------------------------- +
+ +Autogenerated from chart metadata using [helm-docs](https://github.com/norwoodj/helm-docs). Any changes to README.md will be overwriten. + +To regenerate this document, from the root of this chart directory run: +```shell +docker run --rm --volume "$(pwd):/helm-docs" -u "$(id -u)" jnorwood/helm-docs:v1.11.0 +``` diff --git a/charts/kubernetes-log-collector/README.md.gotmpl b/charts/kubernetes-log-collector/README.md.gotmpl new file mode 100644 index 0000000..1989a29 --- /dev/null +++ b/charts/kubernetes-log-collector/README.md.gotmpl @@ -0,0 +1,167 @@ +{{ template "chart.header" . }} + +{{ template "chart.description" . }} + +This is a log file forwarder specialized to deal with forwarding Kubernetes audit logs to Red Canary. It is designed to do so reliably and with low overhead while re-shaping the logs (primarily the number of log objects per offloaded file) for effective and timely processing. + +## System requirements for the log forwarder +The log forwarder takes very few system resources and requires only read access to the directory the audit logs are being written to and read/write access to a small amount of persistent storage for checkpointing its offload progress. + +It is available in 64-bit x86 and arm variants via a multi-arch container. + +## Compatibility with Kubernetes +This log forwarder requires access to the control plane nodes where the Kubernetes API server component is running. This rules out running it on managed cluster services such as Amazon EKS, Microsoft AKS, or Google GKE. Those services provide other mechanisms for access to the Kubernetes audit logs. + +If your cluster does allow access to the control plane nodes, you must also be able to configure the Kubernetes API server to enable and otherwise configure its audit logging and also make those logs available to a host directory on the node. Exact instructions for doing this unfortunately vary between Kubernetes distributions. + +The log forwarder is known to work with the following distributions: +* custom clusters managed with `kubeadm` +* Rancher `k3s` + +## Prerequisites +* Helm v3.0.0+ +* The container image, tagged and pushed to your private registry +* Credentials for accessing your private registry +* A cluster that allows assigning pods to control plane nodes and configuring of the API audit log service +* Outbound network connectivity on control plane nodes to specific addresses + +### Accessing the container images +See the official Red Canary customer documentation for the repository name and access credentials. + +### Enabling and configuring Kubernetes API audit logging +The exact methods for enabling and configuring audit logging will vary based on the kubernetes distribution, but there are some general tasks that need to be performed. + +**Configuration for `kube-apiserver`** + +This component is a core part of kubernetes, and it must be running for the system to work. It therefore cannot be started through the normal scheduling process. It is also configured entirely via command line parameters. Distributions differ in how startup and passing of arguments to it are managed, so the chart cannot provide any automation. + +If your cluster shows `kube-apiserver` pods running on control plane nodes, your distribution likely uses [static pods](https://kubernetes.io/docs/tasks/configure-pod-container/static-pod/). You will need to determine whether they are auto-generated and managed by a tool (such as is the case with `kubeadm`) or whether you can safely edit the manifests manually. + +Otherwise, the control plane nodes may run `kube-apiserver` as a native service via the distribution's service management tool (`systemd`, etc.) You will need to find and edit or override the service specification to change its configuration. + +In the case of `k3s`, the `kube-apiserver` component is built in to the same service binary as other components. The binary still provides a way to pass eqivalents of the command line parameters to the component; see the [`k3s` documentation](https://docs.k3s.io/security/hardening-guide#api-server-audit-configuration) for details. + +Regardless of how the service and its configuration are managed, you will need to check the same set of configuration parameters: +1. `audit-policy-file`: Path to the file that defines the audit policy configuration +2. `audit-log-path`: When set, requests to the API server are logged to this fully-qualified file path according to policy + +You will need to provide (or possibly edit, if one already exists) a policy file and make sure it is located where the `audit-policy-file` parameter is looking for it. If `kube-apiserver` is deployed via a static pod, you will probably want to provide the file and a log directory to the pod via `volumeMount` parameters. + +The chart is not concerned with the policy file, but you will need to note the value you set for `audit-log-path` (along with how that corresponds to any `volumeMount` directory mapping) to set the `directories.log` chart variable (the directory component of the path, mapped to the host filesystem) and the `config.log_file` chart variable (the base name component of the path). + +**Audit log policy considerations** + +It is unwise to log the full details of every API transaction, so the policy configuration allows you to specify rules by which you can get extra details in some situations and little-to-none in others. + +TODO: Describe what details are most relevant, link to example config files + +### Outbound network connectivity requirements +* https://o433963.ingest.us.sentry.io + + 34.120.195.249 +* https://cwp-ingest.redcanary.io + + 3.143.139.141 + + 3.143.177.78 + + 52.14.101.187 + +Note: cwp-ingest.redcanary.io IPs are static. + +To utilize a HTTP proxy, set the following value during your installation: +```console +--set config.http_proxy="https://HOST:PORT" +``` +## Installation + +### Create a namespace for your installation + +```console +kubectl create namespace +``` + +### Create a secret to hold your private registry credentials + +The image pull secret is used to securely authenticate and authorize container image pulls from your private container registry. *This may not be required in all environments.* + +There are two options for doing this. + +The first method creates the secret by passing the credential information on the command line. Ensure the namespace parameter matches the one you just created. + +```console +kubectl create secret docker-registry \ + --docker-server= \ + --docker-username= \ + --docker-password= \ + --namespace= +``` + +Alternatively, you can create the secret from the JSON description of a logged-in docker session, such as is typically stored at `~/.docker/config.json` after executing a `docker login` command. + +```console +kubectl create secret docker-registry \ + --from-file= \ + --namespace= +``` + +### Add the Red Canary Helm repository to your system + +```console +helm repo add redcanary https://redcanaryco.github.io/helm-charts +helm repo update redcanary +``` + +### Install the `kubernetes-log-collector` chart + +Get the default values file to use as a starting point for configuration. + +```console +helm show values redcanary/kubernetes-log-collector > values.yaml +``` + +Edit the values.yaml file with the following goals in mind: + +Ensure the `image` parameters match your private registry and `imagePullSecrets` includes the name of the secret you created with your registry's credentials. + +Make sure you set `tolerations` and either `nodeSelector` or `affinity` to ensure the DaemonSet will deploy its pods to the nodes in your control plane that are running `kube-apiserver`. + +Set the `directories` parameters to appropriate values so that both `kube-apiserver` and `kubernetes-log-collector` will refer to the same host directory for audit log storage and the collector will have a private persistent state directory. + +Set the `config` parameters according to the credentials provided when provisioning the service (`service_id` and `outpost_token`) and ensure `log_file` matches the base name of the active log file that `kube-apiserver` writes audit logs to. Set the `http_proxy` to your proxy URL if you need one for offloading. + +```console +helm install klc redcanary/kubernetes-log-collector \ + --namespace= \ + --values=values.yaml +``` + +## Removal + +Uninstall the DaemonSet with the following command: + +```console +helm uninstall klc --namespace +``` + +That will stop any running pods, but the namespace and image pull secret will remain until you delete the namespace: + +```console +kubectl delete ns +``` + +{{ template "chart.valuesSection" . }} + +{{- /* +## Upgrading +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an incompatible breaking change needing manual actions. + +### To 2.0.0 +This is just an example / placeholder. +*/}} + +---------------------------------------------- +
+ +Autogenerated from chart metadata using [helm-docs](https://github.com/norwoodj/helm-docs). Any changes to README.md will be overwriten. + +To regenerate this document, from the root of this chart directory run: +```shell +docker run --rm --volume "$(pwd):/helm-docs" -u "$(id -u)" jnorwood/helm-docs:v1.11.0 +``` diff --git a/charts/kubernetes-log-collector/ci/test-values.yaml b/charts/kubernetes-log-collector/ci/test-values.yaml new file mode 100644 index 0000000..d4ced3e --- /dev/null +++ b/charts/kubernetes-log-collector/ci/test-values.yaml @@ -0,0 +1,5 @@ +directories: + logs: /var/log +config: + service_id: test + outpost_token: token diff --git a/charts/kubernetes-log-collector/templates/NOTES.txt b/charts/kubernetes-log-collector/templates/NOTES.txt new file mode 100644 index 0000000..47353c2 --- /dev/null +++ b/charts/kubernetes-log-collector/templates/NOTES.txt @@ -0,0 +1,10 @@ +Thank you for installiing Red Canary's {{ .Chart.Name }} helm chart. + +Your release is named {{ .Release.Name }} and has been deployed to the {{ .Release.Namespace }} namespace. + +To learn more about the release, try: + + $ helm status {{ .Release.Name }} -n {{ .Release.Namespace }} + $ helm get all {{ .Release.Name }} -n {{ .Release.Namespace }} + +For additional resources, please visit <> diff --git a/charts/kubernetes-log-collector/templates/_helpers.tpl b/charts/kubernetes-log-collector/templates/_helpers.tpl new file mode 100644 index 0000000..e658b49 --- /dev/null +++ b/charts/kubernetes-log-collector/templates/_helpers.tpl @@ -0,0 +1,51 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "kubernetes-log-collector.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully-qualifed app name. +We truncate at 63 characters because some Kubernetes name fields are limited to this by the DNS specification. +If release name contains chart name it will be sued as a full name. +*/}} +{{- define "kubernetes-log-collector.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "kubernetes-log-collector.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "kubernetes-log-collector.labels" -}} +helm.sh/chart: {{ include "kubernetes-log-collector.chart" . }} +{{ include "kubernetes-log-collector.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "kubernetes-log-collector.selectorLabels" -}} +app.kubernetes.io/name: {{ include "kubernetes-log-collector.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} diff --git a/charts/kubernetes-log-collector/templates/daemonset.yaml b/charts/kubernetes-log-collector/templates/daemonset.yaml new file mode 100644 index 0000000..c869bb4 --- /dev/null +++ b/charts/kubernetes-log-collector/templates/daemonset.yaml @@ -0,0 +1,88 @@ +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: {{ include "kubernetes-log-collector.fullname" . }} + labels: + {{- include "kubernetes-log-collector.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: + {{- include "kubernetes-log-collector.selectorLabels" . | nindent 6 }} + template: + metadata: + labels: + app: {{ include "kubernetes-log-collector.fullname" . }} + {{- include "kubernetes-log-collector.selectorLabels" . | nindent 8 }} + annotations: + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + containers: + - image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + name: {{ .Chart.Name }} + resources: + {{- toYaml .Values.resources | nindent 12 }} + env: + - name: CLUSTER_ID + value: {{ .Values.config.cluster_id }} + - name: SERVICE_ID + value: {{ .Values.config.service_id }} + - name: OUTPOST_TOKEN + value: {{ .Values.config.outpost_token }} + - name: OUTPOST_HOST + value: https://cwp-ingest.redcanary.io/ + - name: LOG_FILE + value: /logs/{{ .Values.config.log_file }} + - name: STATE_DIRECTORY + value: /state + {{- with .Values.config.http_proxy }} + - name: HTTP_PROXY + value: {{ . }} + {{- end }} + {{- with .Values.config.offload_amount }} + - name: OFFLOAD_AMOUNT + value: {{ quote . }} + {{- end }} + {{- with .Values.config.offload_after }} + - name: OFFLOAD_AFTER + value: {{ quote . }} + {{- end }} + securityContext: + runAsUser: 0 + command: ["/opt/redcanary/kubernetes-log-collector"] + volumeMounts: + - name: logs + mountPath: /logs + readOnly: true + - name: state + mountPath: /state + volumes: + - name: logs + hostPath: + path: {{ .Values.directories.logs }} + type: Directory + - name: state + hostPath: + path: {{ .Values.directories.state }} + type: DirectoryOrCreate diff --git a/charts/kubernetes-log-collector/values.yaml b/charts/kubernetes-log-collector/values.yaml new file mode 100644 index 0000000..2ccd0fb --- /dev/null +++ b/charts/kubernetes-log-collector/values.yaml @@ -0,0 +1,85 @@ +# Default values for kubernetes-log-collector +# This is a YAML-formatted file +# Declare variables to be passed into your templates + +image: + # -- repository is the image repository to pull from + # /audit-log-forwarder-prod + repository: redcanary-audit-log-forwarder-prod.jfrog.io/audit-log-forwarder-prod + # -- (string) pullPolicy is the policy for fetching images from the repository at runtime + pullPolicy: IfNotPresent + # -- (string) tag of the image to deploy. Defaults to the app version of the chart + tag: + +# -- imagePullSecrets names Secrets that store credentials that are used for accessing the container registry +imagePullSecrets: [] + # - name: pullSecret + +# -- nameOverride is a string to partially override the kubernetes-log-collector.fullname template (will maintain the release name) +nameOverride: +# -- fullnameOverride is a string to fully override the kubernetes-log-collector.fullname template +fullnameOverride: + +# -- podAnnotations provides extra annotations for the deployed pod(s) +podAnnotations: {} +# -- labels provides extra labels for all the resources created by this chart except the Pod spec +labels: {} + +# -- resources sets the CPU and memory specifications for the pod(s). +resources: {} + # limits: + # cpu: 1 + # memory: "80Mi" + # requests: + # cpu: 300m + # memory: "20Mi" + +# Control plane nodes, which are the ones we must run on, typically have a taint that must be tolerated. +# The examples are typical taints used for control plane nodes; select one or both as appropriate for your cluster + +# -- tolerations allow the DaemonSet pods to run on nodes with specific control plane taints +tolerations: [] + # - key: node-role.kubernetes.io/control-plane + # operator: Exists + # effect: NoSchedule + # - key: node-role.kubernetes.io/master + # operator: Exists + # effect: NoSchedule + +# In addition to tolerating control plane nodes, we need to explicitly select them to run on (rather than worker nodes), which can be done via nodeSelector or affinity. + +# The nodeSelector example is the well-known label applied automatically by tools like kubeadm to control plane nodes + +# You may want to use affinity instead if the api server runs in a pod, but is not deployed on all control plane nodes. You could then use podAffinity to select nodes already running pods with the label `component=kube-apiserver` +# Note that if you use podAffinity, the `namespaces` field is tricky because the scheduler distinguishes between `nil` (meaning "same namespace as this pod") and the empty list (meaning "all namespaces"). Make sure you choose appropriate namespaces and labels for your cluster. + +# -- nodeSelector selects nodes for the DaemonSet pods to run on by node label +nodeSelector: {} + # node-role.kubernetes.io/control-plane: "" + +# -- affinity selects nodes for the DaemonSet pods to run on via an affinity specification +affinity: {} + +# The directories below are used to define hostPath volumes, so they are relative to the node's root filesystem. The logs directory must be the same one where the kube-apiserver has been configured to write its api audit logs. The state directory should be unused by any other process or container; it contains checkpoint state for recovery across pod instantiations. + +directories: + # -- logs is the directory in the node's root filesystem where the audit log file can be found + logs: /var/log/kubernetes + # -- state is the directory in the node's root filesystem to store persistent checkpoint state + state: /run/kubernetes-log-collector + +config: + # -- cluster_id is an identifier you choose to distinguish this cluster from others using the same service_id + cluster_id: default_cluster + # -- service_id is the account identifier assigned when provisioning the service + service_id: + # -- outpost_token is the access token assigned when provisioning the service + outpost_token: + # -- offload_amount is the amount of log traffic, in bytes, that should ideally be sent per offload + offload_amount: "50000000" + # -- offload_after is the amount of time, in seconds, to wait between offloads if the desired offload_amount has not yet accumulated + offload_after: "60" + # -- http_proxy is the URL of a HTTP(s) proxy to use, if desired + http_proxy: + # -- log_file is the base file name the apiserver is configured to use for its audit log + log_file: audit.log