From 10e5df2dad330fbf3fa0fc215d8d35bd87fff3c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Fernandes?= Date: Mon, 19 Oct 2020 10:25:49 -0300 Subject: [PATCH 1/3] upgrade de versoes dos subcharts --- CHANGELOG.md | 18 +++++ DEVELOP.md | 56 +++++++++++++++ README.md | 14 ++-- charts/vkpr/Chart.yaml | 26 +++---- .../cert-manager.crds.yaml | 0 .../monitoring.coreos.com_alertmanagers.yaml | 0 .../monitoring.coreos.com_podmonitors.yaml | 0 .../monitoring.coreos.com_prometheuses.yaml | 0 ...monitoring.coreos.com_prometheusrules.yaml | 0 ...monitoring.coreos.com_servicemonitors.yaml | 0 .../monitoring.coreos.com_thanosrulers.yaml | 0 charts/vkpr/values.yaml | 2 +- examples/values-local.yaml | 68 +++++++++++++++++-- 13 files changed, 156 insertions(+), 28 deletions(-) create mode 100644 CHANGELOG.md create mode 100644 DEVELOP.md rename charts/vkpr/{crds => crds-legacy}/cert-manager.crds.yaml (100%) rename charts/vkpr/{crds => crds-legacy}/monitoring.coreos.com_alertmanagers.yaml (100%) rename charts/vkpr/{crds => crds-legacy}/monitoring.coreos.com_podmonitors.yaml (100%) rename charts/vkpr/{crds => crds-legacy}/monitoring.coreos.com_prometheuses.yaml (100%) rename charts/vkpr/{crds => crds-legacy}/monitoring.coreos.com_prometheusrules.yaml (100%) rename charts/vkpr/{crds => crds-legacy}/monitoring.coreos.com_servicemonitors.yaml (100%) rename charts/vkpr/{crds => crds-legacy}/monitoring.coreos.com_thanosrulers.yaml (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..986c3dee --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,18 @@ +# Release Notes + +## Version 0.8.0 - 2020-10-15 + +Upgrades: + +- cert-manager: 1.0.3 +- external-dns: 3.4.6 +- nginx-ingress: REMOVED +- ingress-nginx: 3.7.1 +- prometheus-operator: REMOVED +- kube-prometheus-stack: 10.1.0 +- loki-stack: 0.41.2 +- vault: 0.7.0 + +## Version 0.7.2 and before + +Crazy times, no record. Sorry fot that. diff --git a/DEVELOP.md b/DEVELOP.md new file mode 100644 index 00000000..b6ed789e --- /dev/null +++ b/DEVELOP.md @@ -0,0 +1,56 @@ +# Development + +Este documento explica como montar um ambiente para desenvolvimento do chart do vtg-ipaas. + +## Pré-requisitos + +### Arquivo /etc/hosts + +Insira a linha abaixo no arquivo /etc/hosts da estação de desenvolvimento: + +``` +127.0.0.1 whoami.localdomain +``` + +### Ferramentas + +Instale localmente as seguintes ferramentas: + +- k3d +- helm +- kubectl + +### Cluster k3d local + +Crie um cluster k3d local para uso durante o desenvolvimento: + +```sh +k3d create -n vkpr-local \ + --publish 8080:32080 \ + --server-arg "--no-deploy=traefik" +``` + +Os parâmetros acima desligam o Trefik (default do k3d), pois o Kong será o Ingress Controller. +Após a criação do cluster ajuste o KUBECONFIG: + +```sh +export KUBECONFIG="$(k3d get-kubeconfig --name='vkpr-local')" +kubectl cluster-info +``` + +## Local VKPR deployment + +### Get chart dependencies + +```sh +cd charts/vkpr +helm dependency update +cd ../.. +``` + +### Helm upgrade/install + +```sh +helm upgrade -i vkpr -f ./examples/values-local.yaml ./charts/vkpr +``` + diff --git a/README.md b/README.md index 92690463..5ac68637 100644 --- a/README.md +++ b/README.md @@ -80,15 +80,15 @@ The **Backup Stack** is dedicated to backup and restore tools in order to migrat ## Charts version -| Charts | VKPR 0.7.2 | +| Charts | VKPR 0.8.0 | |------------------------------------------------------------------------------------------------|------------| -| [cert-manager](https://charts.vertigo.com.br/docs/stacks#cert-manager) | `1.0.1` | -| [ExternalDNS](https://charts.vertigo.com.br/docs/stacks#externaldns) | `3.2.2` | -| [Loki](https://charts.vertigo.com.br/docs/stacks#loki) | `0.37.0` | +| [cert-manager](https://charts.vertigo.com.br/docs/stacks#cert-manager) | `1.0.3` | +| [ExternalDNS](https://charts.vertigo.com.br/docs/stacks#externaldns) | `3.4.6` | +| [Loki](https://charts.vertigo.com.br/docs/stacks#loki) | `0.41.2` | | [Keycloak](https://charts.vertigo.com.br/docs/stacks#keycloak) | `8.2.2` | -| [NGINX Ingress Controller](https://charts.vertigo.com.br/docs/stacks#nginx-ingress-controller) | `1.34.3` | -| [Prometheus Operator](https://charts.vertigo.com.br/docs/stacks#prometheus-operator) | `8.12.3` | -| [Vault](https://charts.vertigo.com.br/docs/stacks#vault) | `0.5.0` | +| [NGINX Ingress Controller](https://charts.vertigo.com.br/docs/stacks#nginx-ingress-controller) | `3.7.1` | +| [Kube Prometheus Stack](https://charts.vertigo.com.br/docs/stacks#prometheus-operator) | `10.1.0` | +| [Vault](https://charts.vertigo.com.br/docs/stacks#vault) | `0.7.0` | | [Velero](https://charts.vertigo.com.br/docs/stacks#velero) | `2.7.4` | ## Requisites diff --git a/charts/vkpr/Chart.yaml b/charts/vkpr/Chart.yaml index fc04eea0..99c9831f 100644 --- a/charts/vkpr/Chart.yaml +++ b/charts/vkpr/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 name: vkpr -version: 0.7.2 +version: 0.8.0 appVersion: v1.4.0 home: https://github.com/vertigobr/vkpr icon: https://vertigo.com.br/wp-content/uploads/favicon.png @@ -17,18 +17,18 @@ dependencies: ### INGRESS STACK ### - name: external-dns repository: https://charts.bitnami.com/bitnami - version: 3.2.2 + version: 3.4.6 condition: external-dns.enabled - - name: nginx-ingress - repository: https://kubernetes-charts.storage.googleapis.com - version: 1.34.3 - condition: nginx-ingress.enabled + - name: ingress-nginx + repository: https://kubernetes.github.io/ingress-nginx + version: 3.7.1 + condition: ingress-nginx.enabled ### LOGGING STACK ### - name: loki-stack repository: https://grafana.github.io/loki/charts - version: 0.37.0 + version: 0.41.2 condition: loki-stack.enabled # # LOGGING STACK @@ -68,10 +68,10 @@ dependencies: # condition: fluent-bit.enabled ### MONITORING STACK ### - - name: prometheus-operator - repository: https://kubernetes-charts.storage.googleapis.com - version: 8.12.3 - condition: prometheus-operator.enabled + - name: kube-prometheus-stack + repository: https://prometheus-community.github.io/helm-charts + version: 10.1.0 + condition: kube-prometheus-stack.enabled # - name: metrics-server # repository: https://kubernetes-charts.storage.googleapis.com @@ -91,7 +91,7 @@ dependencies: ### SECURITY STACK ### - name: cert-manager repository: https://charts.jetstack.io - version: v1.0.1 + version: v1.0.3 condition: cert-manager.enabled - name: keycloak @@ -101,7 +101,7 @@ dependencies: - name: vault repository: https://helm.releases.hashicorp.com - version: 0.5.0 + version: 0.7.0 condition: vault.enabled ### BACKUP STACK ### diff --git a/charts/vkpr/crds/cert-manager.crds.yaml b/charts/vkpr/crds-legacy/cert-manager.crds.yaml similarity index 100% rename from charts/vkpr/crds/cert-manager.crds.yaml rename to charts/vkpr/crds-legacy/cert-manager.crds.yaml diff --git a/charts/vkpr/crds/monitoring.coreos.com_alertmanagers.yaml b/charts/vkpr/crds-legacy/monitoring.coreos.com_alertmanagers.yaml similarity index 100% rename from charts/vkpr/crds/monitoring.coreos.com_alertmanagers.yaml rename to charts/vkpr/crds-legacy/monitoring.coreos.com_alertmanagers.yaml diff --git a/charts/vkpr/crds/monitoring.coreos.com_podmonitors.yaml b/charts/vkpr/crds-legacy/monitoring.coreos.com_podmonitors.yaml similarity index 100% rename from charts/vkpr/crds/monitoring.coreos.com_podmonitors.yaml rename to charts/vkpr/crds-legacy/monitoring.coreos.com_podmonitors.yaml diff --git a/charts/vkpr/crds/monitoring.coreos.com_prometheuses.yaml b/charts/vkpr/crds-legacy/monitoring.coreos.com_prometheuses.yaml similarity index 100% rename from charts/vkpr/crds/monitoring.coreos.com_prometheuses.yaml rename to charts/vkpr/crds-legacy/monitoring.coreos.com_prometheuses.yaml diff --git a/charts/vkpr/crds/monitoring.coreos.com_prometheusrules.yaml b/charts/vkpr/crds-legacy/monitoring.coreos.com_prometheusrules.yaml similarity index 100% rename from charts/vkpr/crds/monitoring.coreos.com_prometheusrules.yaml rename to charts/vkpr/crds-legacy/monitoring.coreos.com_prometheusrules.yaml diff --git a/charts/vkpr/crds/monitoring.coreos.com_servicemonitors.yaml b/charts/vkpr/crds-legacy/monitoring.coreos.com_servicemonitors.yaml similarity index 100% rename from charts/vkpr/crds/monitoring.coreos.com_servicemonitors.yaml rename to charts/vkpr/crds-legacy/monitoring.coreos.com_servicemonitors.yaml diff --git a/charts/vkpr/crds/monitoring.coreos.com_thanosrulers.yaml b/charts/vkpr/crds-legacy/monitoring.coreos.com_thanosrulers.yaml similarity index 100% rename from charts/vkpr/crds/monitoring.coreos.com_thanosrulers.yaml rename to charts/vkpr/crds-legacy/monitoring.coreos.com_thanosrulers.yaml diff --git a/charts/vkpr/values.yaml b/charts/vkpr/values.yaml index b3ce7ffb..0587fbf0 100644 --- a/charts/vkpr/values.yaml +++ b/charts/vkpr/values.yaml @@ -14,7 +14,7 @@ external-dns: cert-manager: enabled: false - installCRDs: true + installCRDs: false graylog: enabled: false diff --git a/examples/values-local.yaml b/examples/values-local.yaml index 95eb91e0..f0b1888b 100644 --- a/examples/values-local.yaml +++ b/examples/values-local.yaml @@ -1,8 +1,7 @@ # -# Valores para testes com o k3d. -# Note que o stack ingress foi desabilitado, pois o k3d já embute um Traefik. +# Valores para testes locais com o k3d. # -# helm upgrade -i -f values-k3d.yaml vkpr ./vkpr +# helm upgrade -i vkpr -f examples/values-local.yaml ./charts/vkpr # # Coloque as seguintes entradas no /etc/hosts : # 127.0.0.1 whoami.localdomain grafana.localdomain graylog.localdomain @@ -10,11 +9,13 @@ # Alguns charts são antigos e falham no k8s >= 1.16. Corrija com: # helm plugin install https://github.com/ContainerSolutions/helm-convert # +# IMPORTANTE: em testes locais, onde o cluster é descartável, recomendamos deixar +# o Helm criar os CRDs quando o chart permitir. # # INGRESS STACK # -nginx-ingress: +ingress-nginx: enabled: true controller: service: @@ -26,6 +27,7 @@ external-dns: enabled: false cert-manager: enabled: false + installCRDs: true # chart values ingress: @@ -86,11 +88,35 @@ loki-stack: # # MONITORING STACK # -prometheus-operator: +kube-prometheus-stack: enabled: true + alertmanager: + enabled: false prometheusOperator: - createCustomResource: false + enabled: true + manageCrds: true # ok para ambiente local + kubeApiServer: + enabled: true + kubelet: + enabled: true + kubeControllerManager: + enabled: true + coreDns: + enabled: true + kubeDns: + enabled: false + kubeEtcd: + enabled: true + kubeScheduler: + enabled: true + kubeProxy: + enabled: true + kubeStateMetrics: + enabled: true + nodeExporter: + enabled: true grafana: + enabled: false image: repository: vertigo/grafana tag: 7.0.0 @@ -115,11 +141,39 @@ prometheus-operator: api_url: http://keycloak.localdomain:32080/auth/realms/vkpr/protocol/openid-connect/userinfo allowed_domains: grafana.localdomain keycloak.localdomain allow_sign_up: true + # deploy prometheus instance + prometheus: + enabled: true +# # SECURITY STACK # -keycloak: + +# +# Vault: +# É preciso inicializar e fazer unseal +# https://learn.hashicorp.com/tutorials/vault/kubernetes-raft-deployment-guide#initialize-and-unseal-vault +# Resumo: +# kubectl get pods -l app.kubernetes.io/name=vault +# kubectl exec -ti vkpr-vault-0 -- vault operator init +# kubectl exec -ti vkpr-vault-0 -- vault operator unseal KEY # repetir 3 vezes +# +vault: enabled: true + server: + ha: + enabled: true + raft: + enabled: true + replicas: 1 + ingress: + enabled: true + hosts: + - host: vault.localdomain + path: ["/"] + +keycloak: + enabled: false prometheus: operator: enabled: true From dbfee0c627562bc750a5d9936c39407a66349bfd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Fernandes?= Date: Wed, 21 Oct 2020 10:34:38 -0300 Subject: [PATCH 2/3] v 0.8.0 --- CHANGELOG.md | 9 +- DEVELOP.md | 59 +- README.md | 4 +- charts/vkpr/Chart.yaml | 10 +- charts/vkpr/templates/acme.yaml | 39 - charts/vkpr/values.yaml | 33 +- charts/vkpr/vault-helm/.circleci/config.yml | 35 - charts/vkpr/vault-helm/.gitignore | 12 - charts/vkpr/vault-helm/.helmignore | 4 - charts/vkpr/vault-helm/CHANGELOG.md | 165 --- charts/vkpr/vault-helm/CONTRIBUTING.md | 209 ---- charts/vkpr/vault-helm/Chart.yaml | 10 - charts/vkpr/vault-helm/LICENSE.md | 353 ------- charts/vkpr/vault-helm/Makefile | 17 - charts/vkpr/vault-helm/README.md | 38 - charts/vkpr/vault-helm/templates/NOTES.txt | 14 - charts/vkpr/vault-helm/templates/_helpers.tpl | 389 ------- .../templates/injector-clusterrole.yaml | 18 - .../injector-clusterrolebinding.yaml | 19 - .../templates/injector-deployment.yaml | 107 -- .../templates/injector-mutating-webhook.yaml | 27 - .../templates/injector-service.yaml | 19 - .../templates/injector-serviceaccount.yaml | 11 - .../templates/server-clusterrolebinding.yaml | 23 - .../templates/server-config-configmap.yaml | 38 - .../templates/server-discovery-role.yaml | 19 - .../server-discovery-rolebinding.yaml | 23 - .../templates/server-disruptionbudget.yaml | 24 - .../templates/server-ha-active-service.yaml | 33 - .../templates/server-ha-standby-service.yaml | 33 - .../templates/server-headless-service.yaml | 33 - .../vault-helm/templates/server-ingress.yaml | 44 - .../vault-helm/templates/server-service.yaml | 46 - .../templates/server-serviceaccount.yaml | 16 - .../templates/server-statefulset.yaml | 167 --- .../vkpr/vault-helm/templates/ui-service.yaml | 47 - .../vault-helm/test/acceptance/_helpers.bash | 159 --- .../acceptance/injector-test/bootstrap.sh | 46 - .../test/acceptance/injector-test/job.yaml | 39 - .../injector-test/pg-deployment.yaml | 69 -- .../injector-test/pgdump-policy.hcl | 3 - .../vault-helm/test/acceptance/injector.bats | 58 -- .../test/acceptance/server-annotations.bats | 46 - .../test/acceptance/server-dev.bats | 64 -- .../acceptance/server-ha-enterprise-dr.bats | 167 --- .../acceptance/server-ha-enterprise-perf.bats | 165 --- .../test/acceptance/server-ha-raft.bats | 119 --- .../vault-helm/test/acceptance/server-ha.bats | 108 -- .../server-test/annotations-overrides.yaml | 9 - .../vault-helm/test/acceptance/server.bats | 116 --- .../vault-helm/test/docker/Test.dockerfile | 45 - .../vkpr/vault-helm/test/terraform/.gitignore | 1 - charts/vkpr/vault-helm/test/terraform/main.tf | 89 -- .../vkpr/vault-helm/test/terraform/outputs.tf | 7 - .../vault-helm/test/terraform/variables.tf | 28 - .../vkpr/vault-helm/test/unit/_helpers.bash | 4 - .../test/unit/injector-clusterrole.bats | 22 - .../unit/injector-clusterrolebinding.bats | 22 - .../test/unit/injector-deployment.bats | 449 -------- .../test/unit/injector-mutating-webhook.bats | 77 -- .../test/unit/injector-service.bats | 37 - .../test/unit/injector-serviceaccount.bats | 22 - .../test/unit/server-clusterrolebinding.bats | 72 -- .../test/unit/server-configmap.bats | 124 --- .../test/unit/server-dev-statefulset.bats | 403 ------- .../test/unit/server-ha-active-service.bats | 14 - .../test/unit/server-ha-disruptionbudget.bats | 87 -- .../test/unit/server-ha-standby-service.bats | 25 - .../test/unit/server-ha-statefulset.bats | 645 ------------ .../vault-helm/test/unit/server-ingress.bats | 95 -- .../vault-helm/test/unit/server-service.bats | 412 -------- .../test/unit/server-serviceaccount.bats | 91 -- .../test/unit/server-statefulset.bats | 982 ------------------ .../vkpr/vault-helm/test/unit/ui-service.bats | 250 ----- charts/vkpr/vault-helm/values.yaml | 441 -------- examples/{ => local}/values-local.yaml | 0 76 files changed, 85 insertions(+), 7674 deletions(-) delete mode 100644 charts/vkpr/templates/acme.yaml delete mode 100644 charts/vkpr/vault-helm/.circleci/config.yml delete mode 100644 charts/vkpr/vault-helm/.gitignore delete mode 100644 charts/vkpr/vault-helm/.helmignore delete mode 100644 charts/vkpr/vault-helm/CHANGELOG.md delete mode 100644 charts/vkpr/vault-helm/CONTRIBUTING.md delete mode 100644 charts/vkpr/vault-helm/Chart.yaml delete mode 100644 charts/vkpr/vault-helm/LICENSE.md delete mode 100644 charts/vkpr/vault-helm/Makefile delete mode 100644 charts/vkpr/vault-helm/README.md delete mode 100644 charts/vkpr/vault-helm/templates/NOTES.txt delete mode 100644 charts/vkpr/vault-helm/templates/_helpers.tpl delete mode 100644 charts/vkpr/vault-helm/templates/injector-clusterrole.yaml delete mode 100644 charts/vkpr/vault-helm/templates/injector-clusterrolebinding.yaml delete mode 100644 charts/vkpr/vault-helm/templates/injector-deployment.yaml delete mode 100644 charts/vkpr/vault-helm/templates/injector-mutating-webhook.yaml delete mode 100644 charts/vkpr/vault-helm/templates/injector-service.yaml delete mode 100644 charts/vkpr/vault-helm/templates/injector-serviceaccount.yaml delete mode 100644 charts/vkpr/vault-helm/templates/server-clusterrolebinding.yaml delete mode 100644 charts/vkpr/vault-helm/templates/server-config-configmap.yaml delete mode 100644 charts/vkpr/vault-helm/templates/server-discovery-role.yaml delete mode 100644 charts/vkpr/vault-helm/templates/server-discovery-rolebinding.yaml delete mode 100644 charts/vkpr/vault-helm/templates/server-disruptionbudget.yaml delete mode 100644 charts/vkpr/vault-helm/templates/server-ha-active-service.yaml delete mode 100644 charts/vkpr/vault-helm/templates/server-ha-standby-service.yaml delete mode 100644 charts/vkpr/vault-helm/templates/server-headless-service.yaml delete mode 100644 charts/vkpr/vault-helm/templates/server-ingress.yaml delete mode 100644 charts/vkpr/vault-helm/templates/server-service.yaml delete mode 100644 charts/vkpr/vault-helm/templates/server-serviceaccount.yaml delete mode 100644 charts/vkpr/vault-helm/templates/server-statefulset.yaml delete mode 100644 charts/vkpr/vault-helm/templates/ui-service.yaml delete mode 100644 charts/vkpr/vault-helm/test/acceptance/_helpers.bash delete mode 100755 charts/vkpr/vault-helm/test/acceptance/injector-test/bootstrap.sh delete mode 100644 charts/vkpr/vault-helm/test/acceptance/injector-test/job.yaml delete mode 100644 charts/vkpr/vault-helm/test/acceptance/injector-test/pg-deployment.yaml delete mode 100644 charts/vkpr/vault-helm/test/acceptance/injector-test/pgdump-policy.hcl delete mode 100644 charts/vkpr/vault-helm/test/acceptance/injector.bats delete mode 100644 charts/vkpr/vault-helm/test/acceptance/server-annotations.bats delete mode 100644 charts/vkpr/vault-helm/test/acceptance/server-dev.bats delete mode 100644 charts/vkpr/vault-helm/test/acceptance/server-ha-enterprise-dr.bats delete mode 100644 charts/vkpr/vault-helm/test/acceptance/server-ha-enterprise-perf.bats delete mode 100644 charts/vkpr/vault-helm/test/acceptance/server-ha-raft.bats delete mode 100644 charts/vkpr/vault-helm/test/acceptance/server-ha.bats delete mode 100644 charts/vkpr/vault-helm/test/acceptance/server-test/annotations-overrides.yaml delete mode 100644 charts/vkpr/vault-helm/test/acceptance/server.bats delete mode 100644 charts/vkpr/vault-helm/test/docker/Test.dockerfile delete mode 100644 charts/vkpr/vault-helm/test/terraform/.gitignore delete mode 100644 charts/vkpr/vault-helm/test/terraform/main.tf delete mode 100644 charts/vkpr/vault-helm/test/terraform/outputs.tf delete mode 100644 charts/vkpr/vault-helm/test/terraform/variables.tf delete mode 100644 charts/vkpr/vault-helm/test/unit/_helpers.bash delete mode 100755 charts/vkpr/vault-helm/test/unit/injector-clusterrole.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/injector-clusterrolebinding.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/injector-deployment.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/injector-mutating-webhook.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/injector-service.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/injector-serviceaccount.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/server-clusterrolebinding.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/server-configmap.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/server-dev-statefulset.bats delete mode 100644 charts/vkpr/vault-helm/test/unit/server-ha-active-service.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/server-ha-disruptionbudget.bats delete mode 100644 charts/vkpr/vault-helm/test/unit/server-ha-standby-service.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/server-ha-statefulset.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/server-ingress.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/server-service.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/server-serviceaccount.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/server-statefulset.bats delete mode 100755 charts/vkpr/vault-helm/test/unit/ui-service.bats delete mode 100644 charts/vkpr/vault-helm/values.yaml rename examples/{ => local}/values-local.yaml (100%) diff --git a/CHANGELOG.md b/CHANGELOG.md index 986c3dee..e3b13bfa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,14 +5,19 @@ Upgrades: - cert-manager: 1.0.3 -- external-dns: 3.4.6 +- external-dns: 3.4.9 - nginx-ingress: REMOVED - ingress-nginx: 3.7.1 - prometheus-operator: REMOVED - kube-prometheus-stack: 10.1.0 - loki-stack: 0.41.2 - vault: 0.7.0 +- velero: 2.13.3 + +Improvements: + +- Several samples for local testing ## Version 0.7.2 and before -Crazy times, no record. Sorry fot that. +Crazy times, no record. Sorry for that. diff --git a/DEVELOP.md b/DEVELOP.md index b6ed789e..888cc275 100644 --- a/DEVELOP.md +++ b/DEVELOP.md @@ -1,7 +1,16 @@ -# Development +# Development Este documento explica como montar um ambiente para desenvolvimento do chart do vtg-ipaas. +- [Pré-requisitos](#pré-requisitos) + - [Arquivo /etc/hosts](#arquivo-etchosts) + - [Ferramentas](#ferramentas) + - [Cluster k3d local](#cluster-k3d-local) +- [Local VKPR deployment](#local-vkpr-deployment) + - [Get chart dependencies](#get-chart-dependencies) + - [Helm upgrade/install](#helm-upgradeinstall) + - [Testing local app](#testing-local-app) + ## Pré-requisitos ### Arquivo /etc/hosts @@ -16,25 +25,36 @@ Insira a linha abaixo no arquivo /etc/hosts da estação de desenvolvimento: Instale localmente as seguintes ferramentas: -- k3d -- helm +- k3d (versão 3.x ou superior) +- helm (v3 ou superior) - kubectl ### Cluster k3d local -Crie um cluster k3d local para uso durante o desenvolvimento: +Crie um cluster k3d local para uso durante o desenvolvimento. Isto pode ser feito de duas formas: + +* Usando o LB interno do k3d (forma preferida) - esta forma cria tanto um binding em `localhost:8080` quanto um IP na rede bridge para o ingress controller do VKPR: ```sh -k3d create -n vkpr-local \ - --publish 8080:32080 \ - --server-arg "--no-deploy=traefik" +k3d cluster create vkpr-local \ + -p "8080:80@loadbalancer" \ + -p "8443:443@loadbalancer" \ + --k3s-server-arg "--no-deploy=traefik" ``` -Os parâmetros acima desligam o Trefik (default do k3d), pois o Kong será o Ingress Controller. -Após a criação do cluster ajuste o KUBECONFIG: +* Usando NodePort - esta forma cria um binding em `localhost:8080` para o serviço que estiver no NodePort 32080 (este **não é** o defult do ingress controller do VKPR): ```sh -export KUBECONFIG="$(k3d get-kubeconfig --name='vkpr-local')" +k3d cluster create vkpr-local \ + -p "8080:32080@agent[0]" --agents 1 \ + --k3s-server-arg "--no-deploy=traefik" \ + --k3s-server-arg "--no-deploy=servicelb" +``` + +Ambos os casos acima desligam o Traefik (ingress default do k3d), pois usaremos o Nginx Ingress Controller que é parte do VKPR. Após a criação do cluster ajuste o KUBECONFIG: + +```sh +export KUBECONFIG=$(k3d kubeconfig write vkpr-local) kubectl cluster-info ``` @@ -43,14 +63,25 @@ kubectl cluster-info ### Get chart dependencies ```sh -cd charts/vkpr -helm dependency update -cd ../.. +helm dependency update ./charts/vkpr ``` ### Helm upgrade/install ```sh -helm upgrade -i vkpr -f ./examples/values-local.yaml ./charts/vkpr +helm upgrade -i vkpr --skip-crds -f ./examples/local/values-local-minimal.yaml ./charts/vkpr ``` +Check the LoadBalancer external IP (might take a few seconds): + +```sh +kubectl get svc +``` + +### Testing local app + +```sh +# both tests are the same +curl whoami.localdomain:8080 +curl -H "Host: whoami.localdomain" +``` diff --git a/README.md b/README.md index 5ac68637..e0fe4f0b 100644 --- a/README.md +++ b/README.md @@ -83,13 +83,13 @@ The **Backup Stack** is dedicated to backup and restore tools in order to migrat | Charts | VKPR 0.8.0 | |------------------------------------------------------------------------------------------------|------------| | [cert-manager](https://charts.vertigo.com.br/docs/stacks#cert-manager) | `1.0.3` | -| [ExternalDNS](https://charts.vertigo.com.br/docs/stacks#externaldns) | `3.4.6` | +| [ExternalDNS](https://charts.vertigo.com.br/docs/stacks#externaldns) | `3.4.9` | | [Loki](https://charts.vertigo.com.br/docs/stacks#loki) | `0.41.2` | | [Keycloak](https://charts.vertigo.com.br/docs/stacks#keycloak) | `8.2.2` | | [NGINX Ingress Controller](https://charts.vertigo.com.br/docs/stacks#nginx-ingress-controller) | `3.7.1` | | [Kube Prometheus Stack](https://charts.vertigo.com.br/docs/stacks#prometheus-operator) | `10.1.0` | | [Vault](https://charts.vertigo.com.br/docs/stacks#vault) | `0.7.0` | -| [Velero](https://charts.vertigo.com.br/docs/stacks#velero) | `2.7.4` | +| [Velero](https://charts.vertigo.com.br/docs/stacks#velero) | `2.13.3` | ## Requisites diff --git a/charts/vkpr/Chart.yaml b/charts/vkpr/Chart.yaml index 99c9831f..1bcfb6d5 100644 --- a/charts/vkpr/Chart.yaml +++ b/charts/vkpr/Chart.yaml @@ -17,7 +17,7 @@ dependencies: ### INGRESS STACK ### - name: external-dns repository: https://charts.bitnami.com/bitnami - version: 3.4.6 + version: 3.4.9 condition: external-dns.enabled - name: ingress-nginx @@ -105,7 +105,11 @@ dependencies: condition: vault.enabled ### BACKUP STACK ### + # - name: velero + # repository: https://kubernetes-charts.storage.googleapis.com/ + # version: 2.7.4 + # condition: velero.enabled - name: velero - repository: https://kubernetes-charts.storage.googleapis.com/ - version: 2.7.4 + repository: https://vmware-tanzu.github.io/helm-charts/ + version: 2.13.3 condition: velero.enabled diff --git a/charts/vkpr/templates/acme.yaml b/charts/vkpr/templates/acme.yaml deleted file mode 100644 index 9dbfb663..00000000 --- a/charts/vkpr/templates/acme.yaml +++ /dev/null @@ -1,39 +0,0 @@ -{{- if index .Values "cert-manager" "enabled" -}} -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-staging -spec: - acme: - # You must replace this email address with your own. - # Let's Encrypt will use this to contact you about expiring - # certificates, and issues related to your account. - email: {{ .Values.acme.email }} - server: https://acme-staging-v02.api.letsencrypt.org/directory - privateKeySecretRef: - # Secret resource used to store the account's private key. - name: acme-staging-issuer-account-key - {{- with .Values.acme.solvers }} - solvers: - {{- toYaml . | nindent 4 }} - {{- end }} ---- -apiVersion: cert-manager.io/v1 -kind: ClusterIssuer -metadata: - name: letsencrypt-production -spec: - acme: - # You must replace this email address with your own. - # Let's Encrypt will use this to contact you about expiring - # certificates, and issues related to your account. - email: {{ .Values.acme.email }} - server: https://acme-v02.api.letsencrypt.org/directory - privateKeySecretRef: - # Secret resource used to store the account's private key. - name: acme-production-issuer-account-key - {{- with .Values.acme.solvers }} - solvers: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end -}} diff --git a/charts/vkpr/values.yaml b/charts/vkpr/values.yaml index 0587fbf0..2eb2c67c 100644 --- a/charts/vkpr/values.yaml +++ b/charts/vkpr/values.yaml @@ -3,10 +3,20 @@ # Declare variables to be passed into your templates. # subchart defaults -nginx-ingress: +ingress-nginx: enabled: true controller: electionID: vkpr-ingress-controller-leader + publishService: + enabled: true + autoscaling: + enabled: false + +# old chart (former stable repo) +# nginx-ingress: +# enabled: true +# controller: +# electionID: vkpr-ingress-controller-leader external-dns: enabled: false @@ -29,13 +39,18 @@ vault: enabled: false loki-stack: - enabled: true + enabled: false -prometheus-operator: - enabled: true +kube-prometheus-stack: + enabled: false prometheusOperator: createCustomResource: false +# prometheus-operator: +# enabled: true +# prometheusOperator: +# createCustomResource: false + # dados para o CRD do ACME acme: email: someone@example.com @@ -117,10 +132,10 @@ affinity: {} # BACKUP STACK velero: enabled: false - image: - repository: velero/velero - tag: v1.2.0 - pullPolicy: IfNotPresent + # image: + # repository: velero/velero + # tag: v1.2.0 + # pullPolicy: IfNotPresent # Annotations to add to the Velero deployment's pod template. Optional. # # If using kube2iam or kiam, use the following annotation with your AWS_ACCOUNT_ID @@ -167,7 +182,7 @@ velero: enabled: true additionalLabels: {} # Install CRDs as a templates. Enabled by default. - installCRDs: true + installCRDs: false ## ## End of deployment-related settings. diff --git a/charts/vkpr/vault-helm/.circleci/config.yml b/charts/vkpr/vault-helm/.circleci/config.yml deleted file mode 100644 index 9d497c07..00000000 --- a/charts/vkpr/vault-helm/.circleci/config.yml +++ /dev/null @@ -1,35 +0,0 @@ -version: 2 -jobs: - bats-unit-test: - machine: true - steps: - - checkout - - run: make test-image - - run: make test-unit - update-helm-charts-index: - docker: - - image: circleci/golang:latest - steps: - - run: - name: update helm-charts index - command: | - curl --show-error --silent --fail --user "${CIRCLE_TOKEN}:" \ - -X POST \ - -H 'Content-Type: application/json' \ - -H 'Accept: application/json' \ - -d "{\"branch\": \"master\",\"parameters\":{\"SOURCE_REPO\": \"${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}\",\"SOURCE_TAG\": \"${CIRCLE_TAG}\"}}" \ - "${CIRCLE_ENDPOINT}/${CIRCLE_PROJECT}/pipeline" -workflows: - version: 2 - build_and_test: - jobs: - - bats-unit-test - update-helm-charts-index: - jobs: - - update-helm-charts-index: - context: helm-charts-trigger - filters: - tags: - only: /^v.*/ - branches: - ignore: /.*/ diff --git a/charts/vkpr/vault-helm/.gitignore b/charts/vkpr/vault-helm/.gitignore deleted file mode 100644 index 6992d23f..00000000 --- a/charts/vkpr/vault-helm/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -.DS_Store -.terraform/ -.terraform.tfstate* -terraform.tfstate* -terraform.tfvars -values.dev.yaml -vaul-helm-dev-creds.json -./test/acceptance/vaul-helm-dev-creds.json -./test/terraform/vaul-helm-dev-creds.json -./test/unit/vaul-helm-dev-creds.json -./test/acceptance/values.yaml -./test/acceptance/values.yml diff --git a/charts/vkpr/vault-helm/.helmignore b/charts/vkpr/vault-helm/.helmignore deleted file mode 100644 index d1180d2f..00000000 --- a/charts/vkpr/vault-helm/.helmignore +++ /dev/null @@ -1,4 +0,0 @@ -.git/ -.terraform/ -bin/ -test/ diff --git a/charts/vkpr/vault-helm/CHANGELOG.md b/charts/vkpr/vault-helm/CHANGELOG.md deleted file mode 100644 index 6eea47b4..00000000 --- a/charts/vkpr/vault-helm/CHANGELOG.md +++ /dev/null @@ -1,165 +0,0 @@ -## Unreleased - -Features: - -Improvements: -* Server configs can now be defined in YAML. Multi-line string configs are still compatible [GH-213](https://github.com/hashicorp/vault-helm/pull/213) -* Removed IPC_LOCK privileges since swap is disabled on containers [[GH-198](https://github.com/hashicorp/vault-helm/pull/198)] -* Use port names that map to vault.scheme [[GH-223](https://github.com/hashicorp/vault-helm/pull/223)] -* Allow both yaml and multi-line string annotations [[GH-272](https://github.com/hashicorp/vault-helm/pull/272)] -* Added configurable to set the Raft node name to hostname [[GH-269](https://github.com/hashicorp/vault-helm/pull/269)] -* Support setting priorityClassName on pods [[GH-282](https://github.com/hashicorp/vault-helm/pull/282)] - -Bugs: -* Fixed default ingress path [[GH-224](https://github.com/hashicorp/vault-helm/pull/224)] -* Fixed annotations for HA standby/active services [[GH-268](https://github.com/hashicorp/vault-helm/pull/268)] - -## 0.5.0 (April 9th, 2020) - -Features: - -* Added Raft support for HA mode [[GH-228](https://github.com/hashicorp/vault-helm/pull/229)] -* Now supports Vault Enterprise [[GH-250](https://github.com/hashicorp/vault-helm/pull/250)] -* Added K8s Service Registration for HA modes [[GH-250](https://github.com/hashicorp/vault-helm/pull/250)] - -* Option to set `AGENT_INJECT_VAULT_AUTH_PATH` for the injector [[GH-185](https://github.com/hashicorp/vault-helm/pull/185)] -* Added environment variables for logging and revocation on Vault Agent Injector [[GH-219](https://github.com/hashicorp/vault-helm/pull/219)] -* Option to set environment variables for the injector deployment [[GH-232](https://github.com/hashicorp/vault-helm/pull/232)] -* Added affinity, tolerations, and nodeSelector options for the injector deployment [[GH-234](https://github.com/hashicorp/vault-helm/pull/234)] -* Made all annotations multi-line strings [[GH-227](https://github.com/hashicorp/vault-helm/pull/227)] - -## 0.4.0 (February 21st, 2020) - -Improvements: - -* Allow process namespace sharing between Vault and sidecar containers [[GH-174](https://github.com/hashicorp/vault-helm/pull/174)] -* Added configurable to change updateStrategy [[GH-172](https://github.com/hashicorp/vault-helm/pull/172)] -* Added sleep in the preStop lifecycle step [[GH-188](https://github.com/hashicorp/vault-helm/pull/188)] -* Updated chart and tests to Helm 3 [[GH-195](https://github.com/hashicorp/vault-helm/pull/195)] -* Adds Values.injector.externalVaultAddr to use the injector with an external vault [[GH-207](https://github.com/hashicorp/vault-helm/pull/207)] - -Bugs: - -* Fix bug where Vault lifecycle was appended after extra containers. [[GH-179](https://github.com/hashicorp/vault-helm/pull/179)] - -## 0.3.3 (January 14th, 2020) - -Security: - -* Added `server.extraArgs` to allow loading of additional Vault configurations containing sensitive settings [GH-175](https://github.com/hashicorp/vault-helm/issues/175) - -Bugs: - -* Fixed injection bug where wrong environment variables were being used for manually mounted TLS files - -## 0.3.2 (January 8th, 2020) - -Bugs: - -* Fixed injection bug where TLS Skip Verify was true by default [VK8S-35] - -## 0.3.1 (January 2nd, 2020) - -Bugs: - -* Fixed injection bug causing kube-system pods to be rejected [VK8S-14] - -## 0.3.0 (December 19th, 2019) - -Features: - -* Extra containers can now be added to the Vault pods -* Added configurability of pod probes -* Added Vault Agent Injector - -Improvements: - -* Moved `global.image` to `server.image` -* Changed UI service template to route pods that aren't ready via `publishNotReadyAddresses: true` -* Added better HTTP/HTTPS scheme support to http probes -* Added configurable node port for Vault service -* `server.authDelegator` is now enabled by default - -Bugs: - -* Fixed upgrade bug by removing chart label which contained the version -* Fixed typo on `serviceAccount` (was `serviceaccount`) -* Fixed readiness/liveliness HTTP probe default to accept standbys - -## 0.2.1 (November 12th, 2019) - -Bugs: - -* Removed `readOnlyRootFilesystem` causing issues when validating deployments - -## 0.2.0 (October 29th, 2019) - -Features: - -* Added load balancer support -* Added ingress support -* Added configurable for service types (ClusterIP, NodePort, LoadBalancer, etc) -* Removed root requirements, now runs as Vault user - -Improvements: - -* Added namespace value to all rendered objects -* Made ports configurable in services -* Added the ability to add custom annotations to services -* Added docker image for running bats test in CircleCI -* Removed restrictions around `dev` mode such as annotations -* `readOnlyRootFilesystem` is now configurable -* Image Pull Policy is now configurable - -Bugs: - -* Fixed selector bugs related to Helm label updates (services, affinities, and pod disruption) -* Fixed bug where audit storage was not being mounted in HA mode -* Fixed bug where Vault pod wasn't receiving SIGTERM signals - - -## 0.1.2 (August 22nd, 2019) - -Features: - -* Added `extraSecretEnvironmentVars` to allow users to mount secrets as - environment variables -* Added `tlsDisable` configurable to change HTTP protocols from HTTP/HTTPS - depending on the value -* Added `serviceNodePort` to configure a NodePort value when setting `serviceType` - to "NodePort" - -Improvements: - -* Changed UI port to 8200 for better HTTP protocol support -* Added `path` to `extraVolumes` to define where the volume should be - mounted. Defaults to `/vault/userconfig` -* Upgraded Vault to 1.2.2 - -Bugs: - -* Fixed bug where upgrade would fail because immutable labels were being - changed (Helm Version label) -* Fixed bug where UI service used wrong selector after updating helm labels -* Added `VAULT_API_ADDR` env to Vault pod to fixed bug where Vault thinks - Consul is the active node -* Removed `step-down` preStop since it requires authentication. Shutdown signal - sent by Kube acts similar to `step-down` - - -## 0.1.1 (August 7th, 2019) - -Features: - -* Added `authDelegator` Cluster Role Binding to Vault service account for - bootstrapping Kube auth method - -Improvements: - -* Added `server.service.clusterIP` to `values.yml` so users can toggle - the Vault service to headless by using the value `None`. -* Upgraded Vault to 1.2.1 - -## 0.1.0 (August 6th, 2019) - -Initial release diff --git a/charts/vkpr/vault-helm/CONTRIBUTING.md b/charts/vkpr/vault-helm/CONTRIBUTING.md deleted file mode 100644 index 431dfa89..00000000 --- a/charts/vkpr/vault-helm/CONTRIBUTING.md +++ /dev/null @@ -1,209 +0,0 @@ -# Contributing to Vault Helm - -**Please note:** We take Vault's security and our users' trust very seriously. -If you believe you have found a security issue in Vault, please responsibly -disclose by contacting us at security@hashicorp.com. - -**First:** if you're unsure or afraid of _anything_, just ask or submit the -issue or pull request anyways. You won't be yelled at for giving it your best -effort. The worst that can happen is that you'll be politely asked to change -something. We appreciate any sort of contributions, and don't want a wall of -rules to get in the way of that. - -That said, if you want to ensure that a pull request is likely to be merged, -talk to us! You can find out our thoughts and ensure that your contribution -won't clash or be obviated by Vault's normal direction. A great way to do this -is via the [Vault Google Group][2]. Sometimes Vault devs are in `#vault-tool` -on Freenode, too. - -This document will cover what we're looking for in terms of reporting issues. -By addressing all the points we're looking for, it raises the chances we can -quickly merge or address your contributions. - -## Issues - -### Reporting an Issue - -* Make sure you test against the latest released version. It is possible - we already fixed the bug you're experiencing. Even better is if you can test - against `master`, as bugs are fixed regularly but new versions are only - released every few months. - -* Provide steps to reproduce the issue, and if possible include the expected - results as well as the actual results. Please provide text, not screen shots! - -* Respond as promptly as possible to any questions made by the Vault - team to your issue. Stale issues will be closed periodically. - -### Issue Lifecycle - -1. The issue is reported. - -2. The issue is verified and categorized by a Vault Helm collaborator. - Categorization is done via tags. For example, bugs are marked as "bugs". - -3. Unless it is critical, the issue may be left for a period of time (sometimes - many weeks), giving outside contributors -- maybe you!? -- a chance to - address the issue. - -4. The issue is addressed in a pull request or commit. The issue will be - referenced in the commit message so that the code that fixes it is clearly - linked. - -5. The issue is closed. Sometimes, valid issues will be closed to keep - the issue tracker clean. The issue is still indexed and available for - future viewers, or can be re-opened if necessary. - -## Testing - -The Helm chart ships with both unit and acceptance tests. - -The unit tests don't require any active Kubernetes cluster and complete -very quickly. These should be used for fast feedback during development. -The acceptance tests require a Kubernetes cluster with a configured `kubectl`. - -### Prequisites -* [Bats](https://github.com/bats-core/bats-core) - ```bash - brew install bats-core - ``` -* [yq](https://pypi.org/project/yq/) - ```bash - brew install python-yq - ``` -* [helm](https://helm.sh) - ```bash - brew install kubernetes-helm - ``` - -### Running The Tests - -To run the unit tests: - - bats ./test/unit - -To run the acceptance tests: - - bats ./test/acceptance - -If the acceptance tests fail, deployed resources in the Kubernetes cluster -may not be properly cleaned up. We recommend recycling the Kubernetes cluster to -start from a clean slate. - -**Note:** There is a Terraform configuration in the -[`test/terraform/`](https://github.com/hashicorp/vault-helm/tree/master/test/terraform) directory -that can be used to quickly bring up a GKE cluster and configure -`kubectl` and `helm` locally. This can be used to quickly spin up a test -cluster for acceptance tests. Unit tests _do not_ require a running Kubernetes -cluster. - -### Writing Unit Tests - -Changes to the Helm chart should be accompanied by appropriate unit tests. - -#### Formatting - -- Put tests in the test file in the same order as the variables appear in the `values.yaml`. -- Start tests for a chart value with a header that says what is being tested, like this: - ``` - #-------------------------------------------------------------------- - # annotations - ``` - -- Name the test based on what it's testing in the following format (this will be its first line): - ``` - @test "
: " { - ``` - - When adding tests to an existing file, the first section will be the same as the other tests in the file. - -#### Test Details - -[Bats](https://github.com/bats-core/bats-core) provides a way to run commands in a shell and inspect the output in an automated way. -In all of the tests in this repo, the base command being run is [helm template](https://docs.helm.sh/helm/#helm-template) which turns the templated files into straight yaml output. -In this way, we're able to test that the various conditionals in the templates render as we would expect. - -Each test defines the files that should be rendered using the `--show-only` flag, then it might adjust chart values by adding `--set` flags as well. -The output from this `helm template` command is then piped to [yq](https://pypi.org/project/yq/). -`yq` allows us to pull out just the information we're interested in, either by referencing its position in the yaml file directly or giving information about it (like its length). -The `-r` flag can be used with `yq` to return a raw string instead of a quoted one which is especially useful when looking for an exact match. - -The test passes or fails based on the conditional at the end that is in square brackets, which is a comparison of our expected value and the output of `helm template` piped to `yq`. - -The `| tee /dev/stderr ` pieces direct any terminal output of the `helm template` and `yq` commands to stderr so that it doesn't interfere with `bats`. - -#### Test Examples - -Here are some examples of common test patterns: - -- Check that a value is disabled by default - - ``` - @test "ui/Service: no type by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "null" ] - } - ``` - - In this example, nothing is changed from the default templates (no `--set` flags), then we use `yq` to retrieve the value we're checking, `.spec.type`. - This output is then compared against our expected value (`null` in this case) in the assertion `[ "${actual}" = "null" ]`. - - -- Check that a template value is rendered to a specific value - ``` - @test "ui/Service: specified type" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'ui.serviceType=LoadBalancer' \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "LoadBalancer" ] - } - ``` - - This is very similar to the last example, except we've changed a default value with the `--set` flag and correspondingly changed the expected value. - -- Check that a template value contains several values - ``` - @test "server/standalone-StatefulSet: custom resources" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.resources.requests.memory=256Mi' \ - --set 'server.resources.requests.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.requests.memory' | tee /dev/stderr) - [ "${actual}" = "256Mi" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.resources.limits.memory=256Mi' \ - --set 'server.resources.limits.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.limits.memory' | tee /dev/stderr) - [ "${actual}" = "256Mi" ] - ``` - - *Note:* If testing more than two conditions, it would be good to separate the `helm template` part of the command from the `yq` sections to reduce redundant work. - -- Check that an entire template file is not rendered - ``` - @test "syncCatalog/Deployment: disabled by default" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'global.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - } - ``` - Here we are check the length of the command output to see if the anything is rendered. - This style can easily be switched to check that a file is rendered instead. diff --git a/charts/vkpr/vault-helm/Chart.yaml b/charts/vkpr/vault-helm/Chart.yaml deleted file mode 100644 index 34693592..00000000 --- a/charts/vkpr/vault-helm/Chart.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v2 -name: vault -version: 0.5.0 -description: Install and configure Vault on Kubernetes. -home: https://www.vaultproject.io -icon: https://github.com/hashicorp/vault/raw/f22d202cde2018f9455dec755118a9b84586e082/Vault_PrimaryLogo_Black.png -sources: - - https://github.com/hashicorp/vault - - https://github.com/hashicorp/vault-helm - - https://github.com/hashicorp/vault-k8s diff --git a/charts/vkpr/vault-helm/LICENSE.md b/charts/vkpr/vault-helm/LICENSE.md deleted file mode 100644 index 82b4de97..00000000 --- a/charts/vkpr/vault-helm/LICENSE.md +++ /dev/null @@ -1,353 +0,0 @@ -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. diff --git a/charts/vkpr/vault-helm/Makefile b/charts/vkpr/vault-helm/Makefile deleted file mode 100644 index 4698fb94..00000000 --- a/charts/vkpr/vault-helm/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -TEST_IMAGE?=vault-helm-test - -test-image: - @docker build --rm -t '$(TEST_IMAGE)' -f $(CURDIR)/test/docker/Test.dockerfile $(CURDIR) - -test-unit: - @docker run -it -v ${PWD}:/helm-test vault-helm-test bats /helm-test/test/unit - -test-acceptance: - @docker run -it -v ${PWD}:/helm-test vault-helm-test bats /helm-test/test/acceptance - -test-bats: test-unit test-acceptance - -test: test-image test-bats - - -.PHONY: test-docker diff --git a/charts/vkpr/vault-helm/README.md b/charts/vkpr/vault-helm/README.md deleted file mode 100644 index b0498250..00000000 --- a/charts/vkpr/vault-helm/README.md +++ /dev/null @@ -1,38 +0,0 @@ -# Vault Helm Chart - -This repository contains the official HashiCorp Helm chart for installing -and configuring Vault on Kubernetes. This chart supports multiple use -cases of Vault on Kubernetes depending on the values provided. - -For full documentation on this Helm chart along with all the ways you can -use Vault with Kubernetes, please see the -[Vault and Kubernetes documentation](https://www.vaultproject.io/docs/platform/k8s/). - -## Prerequisites - -To use the charts here, [Helm](https://helm.sh/) must be installed in your -Kubernetes cluster. Setting up Kubernetes and Helm and is outside the scope -of this README. Please refer to the Kubernetes and Helm documentation. - -The versions required are: - - * **Helm 3.0+** - This is the earliest version of Helm tested. It is possible - it works with earlier versions but this chart is untested for those versions. - * **Kubernetes 1.9+** - This is the earliest version of Kubernetes tested. - It is possible that this chart works with earlier versions but it is - untested. Other versions verified are Kubernetes 1.10, 1.11. - -## Usage - -For now, we do not host a chart repository. To use the charts, you must -download this repository and unpack it into a directory. Either -[download a tagged release](https://github.com/hashicorp/vault-helm/releases) or -use `git checkout` to a tagged release. -Assuming this repository was unpacked into the directory `vault-helm`, the chart can -then be installed directly: - - helm install ./vault-helm - -Please see the many options supported in the `values.yaml` -file. These are also fully documented directly on the -[Vault website](https://www.vaultproject.io/docs/platform/k8s/helm). diff --git a/charts/vkpr/vault-helm/templates/NOTES.txt b/charts/vkpr/vault-helm/templates/NOTES.txt deleted file mode 100644 index b784f572..00000000 --- a/charts/vkpr/vault-helm/templates/NOTES.txt +++ /dev/null @@ -1,14 +0,0 @@ - -Thank you for installing HashiCorp Vault! - -Now that you have deployed Vault, you should look over the docs on using -Vault with Kubernetes available here: - -https://www.vaultproject.io/docs/ - - -Your release is named {{ .Release.Name }}. To learn more about the release, try: - - $ helm status {{ .Release.Name }} - $ helm get {{ .Release.Name }} - diff --git a/charts/vkpr/vault-helm/templates/_helpers.tpl b/charts/vkpr/vault-helm/templates/_helpers.tpl deleted file mode 100644 index bab233b9..00000000 --- a/charts/vkpr/vault-helm/templates/_helpers.tpl +++ /dev/null @@ -1,389 +0,0 @@ -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to -this (by the DNS naming spec). If release name contains chart name it will -be used as a full name. -*/}} -{{- define "vault.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "vault.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Expand the name of the chart. -*/}} -{{- define "vault.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Compute the maximum number of unavailable replicas for the PodDisruptionBudget. -This defaults to (n/2)-1 where n is the number of members of the server cluster. -Add a special case for replicas=1, where it should default to 0 as well. -*/}} -{{- define "vault.pdb.maxUnavailable" -}} -{{- if eq (int .Values.server.ha.replicas) 1 -}} -{{ 0 }} -{{- else if .Values.server.ha.disruptionBudget.maxUnavailable -}} -{{ .Values.server.ha.disruptionBudget.maxUnavailable -}} -{{- else -}} -{{- div (sub (div (mul (int .Values.server.ha.replicas) 10) 2) 1) 10 -}} -{{- end -}} -{{- end -}} - -{{/* -Set the variable 'mode' to the server mode requested by the user to simplify -template logic. -*/}} -{{- define "vault.mode" -}} - {{- if .Values.injector.externalVaultAddr -}} - {{- $_ := set . "mode" "external" -}} - {{- else if eq (.Values.server.dev.enabled | toString) "true" -}} - {{- $_ := set . "mode" "dev" -}} - {{- else if eq (.Values.server.ha.enabled | toString) "true" -}} - {{- $_ := set . "mode" "ha" -}} - {{- else if or (eq (.Values.server.standalone.enabled | toString) "true") (eq (.Values.server.standalone.enabled | toString) "-") -}} - {{- $_ := set . "mode" "standalone" -}} - {{- else -}} - {{- $_ := set . "mode" "" -}} - {{- end -}} -{{- end -}} - -{{/* -Set's the replica count based on the different modes configured by user -*/}} -{{- define "vault.replicas" -}} - {{ if eq .mode "standalone" }} - {{- default 1 -}} - {{ else if eq .mode "ha" }} - {{- .Values.server.ha.replicas | default 3 -}} - {{ else }} - {{- default 1 -}} - {{ end }} -{{- end -}} - -{{/* -Set's up configmap mounts if this isn't a dev deployment and the user -defined a custom configuration. Additionally iterates over any -extra volumes the user may have specified (such as a secret with TLS). -*/}} -{{- define "vault.volumes" -}} - {{- if and (ne .mode "dev") (or (.Values.server.standalone.config) (.Values.server.ha.config)) }} - - name: config - configMap: - name: {{ template "vault.fullname" . }}-config - {{ end }} - {{- range .Values.server.extraVolumes }} - - name: userconfig-{{ .name }} - {{ .type }}: - {{- if (eq .type "configMap") }} - name: {{ .name }} - {{- else if (eq .type "secret") }} - secretName: {{ .name }} - {{- end }} - {{- end }} -{{- end -}} - -{{/* -Set's a command to override the entrypoint defined in the image -so we can make the user experience nicer. This works in with -"vault.args" to specify what commands /bin/sh should run. -*/}} -{{- define "vault.command" -}} - {{ if or (eq .mode "standalone") (eq .mode "ha") }} - - "/bin/sh" - - "-ec" - {{ end }} -{{- end -}} - -{{/* -Set's the args for custom command to render the Vault configuration -file with IP addresses to make the out of box experience easier -for users looking to use this chart with Consul Helm. -*/}} -{{- define "vault.args" -}} - {{ if or (eq .mode "standalone") (eq .mode "ha") }} - - | - sed -E "s/HOST_IP/${HOST_IP?}/g" /vault/config/extraconfig-from-values.hcl > /tmp/storageconfig.hcl; - sed -Ei "s/POD_IP/${POD_IP?}/g" /tmp/storageconfig.hcl; - /usr/local/bin/docker-entrypoint.sh vault server -config=/tmp/storageconfig.hcl {{ .Values.server.extraArgs }} - {{ end }} -{{- end -}} - -{{/* -Set's additional environment variables based on the mode. -*/}} -{{- define "vault.envs" -}} - {{ if eq .mode "dev" }} - - name: VAULT_DEV_ROOT_TOKEN_ID - value: "root" - {{ end }} -{{- end -}} - -{{/* -Set's which additional volumes should be mounted to the container -based on the mode configured. -*/}} -{{- define "vault.mounts" -}} - {{ if eq (.Values.server.auditStorage.enabled | toString) "true" }} - - name: audit - mountPath: /vault/audit - {{ end }} - {{ if or (eq .mode "standalone") (and (eq .mode "ha") (eq (.Values.server.ha.raft.enabled | toString) "true")) }} - {{ if eq (.Values.server.dataStorage.enabled | toString) "true" }} - - name: data - mountPath: /vault/data - {{ end }} - {{ end }} - {{ if and (ne .mode "dev") (or (.Values.server.standalone.config) (.Values.server.ha.config)) }} - - name: config - mountPath: /vault/config - {{ end }} - {{- range .Values.server.extraVolumes }} - - name: userconfig-{{ .name }} - readOnly: true - mountPath: {{ .path | default "/vault/userconfig" }}/{{ .name }} - {{- end }} -{{- end -}} - -{{/* -Set's up the volumeClaimTemplates when data or audit storage is required. HA -might not use data storage since Consul is likely it's backend, however, audit -storage might be desired by the user. -*/}} -{{- define "vault.volumeclaims" -}} - {{- if and (ne .mode "dev") (or .Values.server.dataStorage.enabled .Values.server.auditStorage.enabled) }} - volumeClaimTemplates: - {{- if and (eq (.Values.server.dataStorage.enabled | toString) "true") (or (eq .mode "standalone") (eq (.Values.server.ha.raft.enabled | toString ) "true" )) }} - - metadata: - name: data - spec: - accessModes: - - {{ .Values.server.dataStorage.accessMode | default "ReadWriteOnce" }} - resources: - requests: - storage: {{ .Values.server.dataStorage.size }} - {{- if .Values.server.dataStorage.storageClass }} - storageClassName: {{ .Values.server.dataStorage.storageClass }} - {{- end }} - {{ end }} - {{- if eq (.Values.server.auditStorage.enabled | toString) "true" }} - - metadata: - name: audit - spec: - accessModes: - - {{ .Values.server.auditStorage.accessMode | default "ReadWriteOnce" }} - resources: - requests: - storage: {{ .Values.server.auditStorage.size }} - {{- if .Values.server.auditStorage.storageClass }} - storageClassName: {{ .Values.server.auditStorage.storageClass }} - {{- end }} - {{ end }} - {{ end }} -{{- end -}} - -{{/* -Set's the affinity for pod placement when running in standalone and HA modes. -*/}} -{{- define "vault.affinity" -}} - {{- if and (ne .mode "dev") .Values.server.affinity }} - affinity: - {{ tpl .Values.server.affinity . | nindent 8 | trim }} - {{ end }} -{{- end -}} - -{{/* -Sets the injector affinity for pod placement -*/}} -{{- define "injector.affinity" -}} - {{- if .Values.injector.affinity }} - affinity: - {{ tpl .Values.injector.affinity . | nindent 8 | trim }} - {{ end }} -{{- end -}} - -{{/* -Set's the toleration for pod placement when running in standalone and HA modes. -*/}} -{{- define "vault.tolerations" -}} - {{- if and (ne .mode "dev") .Values.server.tolerations }} - tolerations: - {{ tpl .Values.server.tolerations . | nindent 8 | trim }} - {{- end }} -{{- end -}} - -{{/* -Sets the injector toleration for pod placement -*/}} -{{- define "injector.tolerations" -}} - {{- if .Values.injector.tolerations }} - tolerations: - {{ tpl .Values.injector.tolerations . | nindent 8 | trim }} - {{- end }} -{{- end -}} - -{{/* -Set's the node selector for pod placement when running in standalone and HA modes. -*/}} -{{- define "vault.nodeselector" -}} - {{- if and (ne .mode "dev") .Values.server.nodeSelector }} - nodeSelector: - {{ tpl .Values.server.nodeSelector . | indent 8 | trim }} - {{- end }} -{{- end -}} - -{{/* -Sets the injector node selector for pod placement -*/}} -{{- define "injector.nodeselector" -}} - {{- if .Values.injector.nodeSelector }} - nodeSelector: - {{ tpl .Values.injector.nodeSelector . | indent 8 | trim }} - {{- end }} -{{- end -}} - -{{/* -Sets extra pod annotations -*/}} -{{- define "vault.annotations" -}} - {{- if and (ne .mode "dev") .Values.server.annotations }} - annotations: - {{- $tp := typeOf .Values.server.annotations }} - {{- if eq $tp "string" }} - {{- tpl .Values.server.annotations . | nindent 8 }} - {{- else }} - {{- toYaml .Values.server.annotations | nindent 8 }} - {{- end }} - {{- end }} -{{- end -}} - -{{/* -Sets extra ui service annotations -*/}} -{{- define "vault.ui.annotations" -}} - {{- if .Values.ui.annotations }} - annotations: - {{- $tp := typeOf .Values.ui.annotations }} - {{- if eq $tp "string" }} - {{- tpl .Values.ui.annotations . | nindent 4 }} - {{- else }} - {{- toYaml .Values.ui.annotations | nindent 4 }} - {{- end }} - {{- end }} -{{- end -}} - -{{/* -Sets extra service account annotations -*/}} -{{- define "vault.serviceAccount.annotations" -}} - {{- if and (ne .mode "dev") .Values.server.serviceAccount.annotations }} - annotations: - {{- $tp := typeOf .Values.server.serviceAccount.annotations }} - {{- if eq $tp "string" }} - {{- tpl .Values.server.serviceAccount.annotations . | nindent 4 }} - {{- else }} - {{- toYaml .Values.server.serviceAccount.annotations | nindent 4 }} - {{- end }} - {{- end }} -{{- end -}} - -{{/* -Sets extra ingress annotations -*/}} -{{- define "vault.ingress.annotations" -}} - {{- if .Values.server.ingress.annotations }} - annotations: - {{- $tp := typeOf .Values.server.ingress.annotations }} - {{- if eq $tp "string" }} - {{- tpl .Values.server.ingress.annotations . | nindent 4 }} - {{- else }} - {{- toYaml .Values.server.ingress.annotations | nindent 4 }} - {{- end }} - {{- end }} -{{- end -}} - -{{/* -Sets extra vault server Service annotations -*/}} -{{- define "vault.service.annotations" -}} - {{- if .Values.server.service.annotations }} - {{- $tp := typeOf .Values.server.service.annotations }} - {{- if eq $tp "string" }} - {{- tpl .Values.server.service.annotations . | nindent 4 }} - {{- else }} - {{- toYaml .Values.server.service.annotations | nindent 4 }} - {{- end }} - {{- end }} -{{- end -}} - -{{/* -Set's the container resources if the user has set any. -*/}} -{{- define "vault.resources" -}} - {{- if .Values.server.resources -}} - resources: -{{ toYaml .Values.server.resources | indent 12}} - {{ end }} -{{- end -}} - -{{/* -Sets the container resources if the user has set any. -*/}} -{{- define "injector.resources" -}} - {{- if .Values.injector.resources -}} - resources: -{{ toYaml .Values.injector.resources | indent 12}} - {{ end }} -{{- end -}} - -{{/* -Inject extra environment vars in the format key:value, if populated -*/}} -{{- define "vault.extraEnvironmentVars" -}} -{{- if .extraEnvironmentVars -}} -{{- range $key, $value := .extraEnvironmentVars }} -- name: {{ printf "%s" $key | replace "." "_" | upper | quote }} - value: {{ $value | quote }} -{{- end }} -{{- end -}} -{{- end -}} - -{{/* -Inject extra environment populated by secrets, if populated -*/}} -{{- define "vault.extraSecretEnvironmentVars" -}} -{{- if .extraSecretEnvironmentVars -}} -{{- range .extraSecretEnvironmentVars }} -- name: {{ .envName }} - valueFrom: - secretKeyRef: - name: {{ .secretName }} - key: {{ .secretKey }} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* Scheme for health check and local endpoint */}} -{{- define "vault.scheme" -}} -{{- if .Values.global.tlsDisable -}} -{{ "http" }} -{{- else -}} -{{ "https" }} -{{- end -}} -{{- end -}} diff --git a/charts/vkpr/vault-helm/templates/injector-clusterrole.yaml b/charts/vkpr/vault-helm/templates/injector-clusterrole.yaml deleted file mode 100644 index 4ff25abe..00000000 --- a/charts/vkpr/vault-helm/templates/injector-clusterrole.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- if and (eq (.Values.injector.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRole -metadata: - name: {{ template "vault.fullname" . }}-agent-injector-clusterrole - labels: - app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -rules: -- apiGroups: ["admissionregistration.k8s.io"] - resources: ["mutatingwebhookconfigurations"] - verbs: - - "get" - - "list" - - "watch" - - "patch" -{{ end }} diff --git a/charts/vkpr/vault-helm/templates/injector-clusterrolebinding.yaml b/charts/vkpr/vault-helm/templates/injector-clusterrolebinding.yaml deleted file mode 100644 index 9826693b..00000000 --- a/charts/vkpr/vault-helm/templates/injector-clusterrolebinding.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if and (eq (.Values.injector.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: ClusterRoleBinding -metadata: - name: {{ template "vault.fullname" . }}-agent-injector-binding - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ template "vault.fullname" . }}-agent-injector-clusterrole -subjects: -- kind: ServiceAccount - name: {{ template "vault.fullname" . }}-agent-injector - namespace: {{ .Release.Namespace }} -{{ end }} diff --git a/charts/vkpr/vault-helm/templates/injector-deployment.yaml b/charts/vkpr/vault-helm/templates/injector-deployment.yaml deleted file mode 100644 index 1c5b951b..00000000 --- a/charts/vkpr/vault-helm/templates/injector-deployment.yaml +++ /dev/null @@ -1,107 +0,0 @@ -{{- if and (eq (.Values.injector.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") }} -# Deployment for the injector -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ template "vault.fullname" . }}-agent-injector - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - component: webhook -spec: - replicas: 1 - selector: - matchLabels: - app.kubernetes.io/name: {{ template "vault.name" . }}-agent-injector - app.kubernetes.io/instance: {{ .Release.Name }} - component: webhook - template: - metadata: - labels: - app.kubernetes.io/name: {{ template "vault.name" . }}-agent-injector - app.kubernetes.io/instance: {{ .Release.Name }} - component: webhook - spec: - {{ template "injector.affinity" . }} - {{ template "injector.tolerations" . }} - {{ template "injector.nodeselector" . }} - {{- if .Values.injector.priorityClassName }} - priorityClassName: {{ .Values.injector.priorityClassName }} - {{- end }} - serviceAccountName: "{{ template "vault.fullname" . }}-agent-injector" - securityContext: - runAsNonRoot: true - runAsGroup: {{ .Values.injector.gid | default 1000 }} - runAsUser: {{ .Values.injector.uid | default 100 }} - containers: - - name: sidecar-injector - {{ template "injector.resources" . }} - image: "{{ .Values.injector.image.repository }}:{{ .Values.injector.image.tag }}" - imagePullPolicy: "{{ .Values.injector.image.pullPolicy }}" - env: - - name: AGENT_INJECT_LISTEN - value: ":8080" - - name: AGENT_INJECT_LOG_LEVEL - value: {{ .Values.injector.logLevel | default "info" }} - - name: AGENT_INJECT_VAULT_ADDR - {{- if .Values.injector.externalVaultAddr }} - value: "{{ .Values.injector.externalVaultAddr }}" - {{- else }} - value: {{ include "vault.scheme" . }}://{{ template "vault.fullname" . }}.{{ .Release.Namespace }}.svc:{{ .Values.server.service.port }} - {{- end }} - - name: AGENT_INJECT_VAULT_AUTH_PATH - value: {{ .Values.injector.authPath }} - - name: AGENT_INJECT_VAULT_IMAGE - value: "{{ .Values.injector.agentImage.repository }}:{{ .Values.injector.agentImage.tag }}" - {{- if .Values.injector.certs.secretName }} - - name: AGENT_INJECT_TLS_CERT_FILE - value: "/etc/webhook/certs/{{ .Values.injector.certs.certName }}" - - name: AGENT_INJECT_TLS_KEY_FILE - value: "/etc/webhook/certs/{{ .Values.injector.certs.keyName }}" - {{- else }} - - name: AGENT_INJECT_TLS_AUTO - value: {{ template "vault.fullname" . }}-agent-injector-cfg - - name: AGENT_INJECT_TLS_AUTO_HOSTS - value: {{ template "vault.fullname" . }}-agent-injector-svc,{{ template "vault.fullname" . }}-agent-injector-svc.{{ .Release.Namespace }},{{ template "vault.fullname" . }}-agent-injector-svc.{{ .Release.Namespace }}.svc - {{- end }} - - name: AGENT_INJECT_LOG_FORMAT - value: {{ .Values.injector.logFormat | default "standard" }} - - name: AGENT_INJECT_REVOKE_ON_SHUTDOWN - value: "{{ .Values.injector.revokeOnShutdown | default false }}" - {{- include "vault.extraEnvironmentVars" .Values.injector | nindent 12 }} - args: - - agent-inject - - 2>&1 - livenessProbe: - httpGet: - path: /health/ready - port: 8080 - scheme: HTTPS - failureThreshold: 2 - initialDelaySeconds: 1 - periodSeconds: 2 - successThreshold: 1 - timeoutSeconds: 5 - readinessProbe: - httpGet: - path: /health/ready - port: 8080 - scheme: HTTPS - failureThreshold: 2 - initialDelaySeconds: 2 - periodSeconds: 2 - successThreshold: 1 - timeoutSeconds: 5 -{{- if .Values.injector.certs.secretName }} - volumeMounts: - - name: webhook-certs - mountPath: /etc/webhook/certs - readOnly: true - volumes: - - name: webhook-certs - secret: - secretName: "{{ .Values.injector.certs.secretName }}" -{{- end }} -{{ end }} diff --git a/charts/vkpr/vault-helm/templates/injector-mutating-webhook.yaml b/charts/vkpr/vault-helm/templates/injector-mutating-webhook.yaml deleted file mode 100644 index 3f0d27ea..00000000 --- a/charts/vkpr/vault-helm/templates/injector-mutating-webhook.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{{- if and (eq (.Values.injector.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") }} -apiVersion: admissionregistration.k8s.io/v1beta1 -kind: MutatingWebhookConfiguration -metadata: - name: {{ template "vault.fullname" . }}-agent-injector-cfg - labels: - app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -webhooks: - - name: vault.hashicorp.com - clientConfig: - service: - name: {{ template "vault.fullname" . }}-agent-injector-svc - namespace: {{ .Release.Namespace }} - path: "/mutate" - caBundle: {{ .Values.injector.certs.caBundle }} - rules: - - operations: ["CREATE", "UPDATE"] - apiGroups: [""] - apiVersions: ["v1"] - resources: ["pods"] -{{- if .Values.injector.namespaceSelector }} - namespaceSelector: -{{ toYaml .Values.injector.namespaceSelector | indent 6}} -{{ end }} -{{ end }} diff --git a/charts/vkpr/vault-helm/templates/injector-service.yaml b/charts/vkpr/vault-helm/templates/injector-service.yaml deleted file mode 100644 index 79d818ff..00000000 --- a/charts/vkpr/vault-helm/templates/injector-service.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if and (eq (.Values.injector.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") }} -apiVersion: v1 -kind: Service -metadata: - name: {{ template "vault.fullname" . }}-agent-injector-svc - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - ports: - - port: 443 - targetPort: 8080 - selector: - app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector - app.kubernetes.io/instance: {{ .Release.Name }} - component: webhook -{{- end }} diff --git a/charts/vkpr/vault-helm/templates/injector-serviceaccount.yaml b/charts/vkpr/vault-helm/templates/injector-serviceaccount.yaml deleted file mode 100644 index a28d38fa..00000000 --- a/charts/vkpr/vault-helm/templates/injector-serviceaccount.yaml +++ /dev/null @@ -1,11 +0,0 @@ -{{- if and (eq (.Values.injector.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") }} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "vault.fullname" . }}-agent-injector - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ include "vault.name" . }}-agent-injector - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -{{ end }} diff --git a/charts/vkpr/vault-helm/templates/server-clusterrolebinding.yaml b/charts/vkpr/vault-helm/templates/server-clusterrolebinding.yaml deleted file mode 100644 index 733764f9..00000000 --- a/charts/vkpr/vault-helm/templates/server-clusterrolebinding.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{ template "vault.mode" . }} -{{- if ne .mode "external" }} -{{- if and (ne .mode "") (and (eq (.Values.global.enabled | toString) "true") (eq (.Values.server.authDelegator.enabled | toString) "true")) }} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: ClusterRoleBinding -metadata: - name: {{ template "vault.fullname" . }}-server-binding - namespace: {{ .Release.Namespace }} - labels: - helm.sh/chart: {{ include "vault.chart" . }} - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: system:auth-delegator -subjects: -- kind: ServiceAccount - name: {{ template "vault.fullname" . }} - namespace: {{ .Release.Namespace }} -{{ end }} -{{ end }} diff --git a/charts/vkpr/vault-helm/templates/server-config-configmap.yaml b/charts/vkpr/vault-helm/templates/server-config-configmap.yaml deleted file mode 100644 index b8093ad0..00000000 --- a/charts/vkpr/vault-helm/templates/server-config-configmap.yaml +++ /dev/null @@ -1,38 +0,0 @@ -{{ template "vault.mode" . }} -{{- if ne .mode "external" }} -{{- if and (eq (.Values.global.enabled | toString) "true") (ne .mode "dev") -}} -{{ if or (.Values.server.standalone.config) (.Values.server.ha.config) -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "vault.fullname" . }}-config - namespace: {{ .Release.Namespace }} - labels: - helm.sh/chart: {{ include "vault.chart" . }} - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -data: - extraconfig-from-values.hcl: |- - {{- if or (eq .mode "ha") (eq .mode "standalone") }} - {{- $type := typeOf (index .Values.server .mode).config }} - {{- if eq $type "string" }} - disable_mlock = true - {{- if eq .mode "standalone" }} - {{ tpl .Values.server.standalone.config . | nindent 4 | trim }} - {{- else if and (eq .mode "ha") (eq (.Values.server.ha.raft.enabled | toString) "false") }} - {{ tpl .Values.server.ha.config . | nindent 4 | trim }} - {{- else if and (eq .mode "ha") (eq (.Values.server.ha.raft.enabled | toString) "true") }} - {{ tpl .Values.server.ha.raft.config . | nindent 4 | trim }} - {{ end }} - {{- else }} - {{- if and (eq .mode "ha") (eq (.Values.server.ha.raft.enabled | toString) "true") }} -{{ merge (dict "disable_mlock" true) (index .Values.server .mode).raft.config | toPrettyJson | indent 4 }} - {{- else }} -{{ merge (dict "disable_mlock" true) (index .Values.server .mode).config | toPrettyJson | indent 4 }} - {{- end }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} -{{- end }} diff --git a/charts/vkpr/vault-helm/templates/server-discovery-role.yaml b/charts/vkpr/vault-helm/templates/server-discovery-role.yaml deleted file mode 100644 index 4a39cec2..00000000 --- a/charts/vkpr/vault-helm/templates/server-discovery-role.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{ template "vault.mode" . }} -{{- if ne .mode "external" }} -{{- if and (eq .mode "ha" ) (eq (.Values.global.enabled | toString) "true") }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - namespace: {{ .Release.Namespace }} - name: {{ template "vault.fullname" . }}-discovery-role - labels: - helm.sh/chart: {{ include "vault.chart" . }} - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -rules: -- apiGroups: [""] - resources: ["pods"] - verbs: ["get", "watch", "list", "update", "patch"] -{{ end }} -{{ end }} diff --git a/charts/vkpr/vault-helm/templates/server-discovery-rolebinding.yaml b/charts/vkpr/vault-helm/templates/server-discovery-rolebinding.yaml deleted file mode 100644 index f9494b47..00000000 --- a/charts/vkpr/vault-helm/templates/server-discovery-rolebinding.yaml +++ /dev/null @@ -1,23 +0,0 @@ -{{ template "vault.mode" . }} -{{- if ne .mode "external" }} -{{- if and (eq .mode "ha" ) (eq (.Values.global.enabled | toString) "true") }} -apiVersion: rbac.authorization.k8s.io/v1beta1 -kind: RoleBinding -metadata: - name: {{ template "vault.fullname" . }}-discovery-rolebinding - namespace: {{ .Release.Namespace }} - labels: - helm.sh/chart: {{ include "vault.chart" . }} - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ template "vault.fullname" . }}-discovery-role -subjects: -- kind: ServiceAccount - name: {{ template "vault.fullname" . }} - namespace: {{ .Release.Namespace }} -{{ end }} -{{ end }} diff --git a/charts/vkpr/vault-helm/templates/server-disruptionbudget.yaml b/charts/vkpr/vault-helm/templates/server-disruptionbudget.yaml deleted file mode 100644 index 6d7f8240..00000000 --- a/charts/vkpr/vault-helm/templates/server-disruptionbudget.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{ template "vault.mode" . }} -{{- if ne .mode "external" -}} -{{- if and (and (eq (.Values.global.enabled | toString) "true") (eq .mode "ha")) (eq (.Values.server.ha.disruptionBudget.enabled | toString) "true") -}} -# PodDisruptionBudget to prevent degrading the server cluster through -# voluntary cluster changes. -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - name: {{ template "vault.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - helm.sh/chart: {{ include "vault.chart" . }} - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - maxUnavailable: {{ template "vault.pdb.maxUnavailable" . }} - selector: - matchLabels: - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - component: server -{{- end -}} -{{- end -}} diff --git a/charts/vkpr/vault-helm/templates/server-ha-active-service.yaml b/charts/vkpr/vault-helm/templates/server-ha-active-service.yaml deleted file mode 100644 index 01f962d4..00000000 --- a/charts/vkpr/vault-helm/templates/server-ha-active-service.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{ template "vault.mode" . }} -{{- if ne .mode "external" }} -{{- if and (eq .mode "ha" ) (and (eq (.Values.server.service.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true")) }} -# Service for active Vault pod -apiVersion: v1 -kind: Service -metadata: - name: {{ template "vault.fullname" . }}-active - namespace: {{ .Release.Namespace }} - labels: - helm.sh/chart: {{ include "vault.chart" . }} - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - annotations: -{{ template "vault.service.annotations" .}} -spec: - type: ClusterIP - publishNotReadyAddresses: true - ports: - - name: http - port: 8200 - targetPort: 8200 - - name: internal - port: 8201 - targetPort: 8201 - selector: - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - component: server - vault-active: "true" -{{- end }} -{{- end }} diff --git a/charts/vkpr/vault-helm/templates/server-ha-standby-service.yaml b/charts/vkpr/vault-helm/templates/server-ha-standby-service.yaml deleted file mode 100644 index 302627a4..00000000 --- a/charts/vkpr/vault-helm/templates/server-ha-standby-service.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{ template "vault.mode" . }} -{{- if ne .mode "external" }} -{{- if and (eq .mode "ha" ) (and (eq (.Values.server.service.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true")) }} -# Service for active Vault pod -apiVersion: v1 -kind: Service -metadata: - name: {{ template "vault.fullname" . }}-standby - namespace: {{ .Release.Namespace }} - labels: - helm.sh/chart: {{ include "vault.chart" . }} - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - annotations: -{{ template "vault.service.annotations" .}} -spec: - type: ClusterIP - publishNotReadyAddresses: true - ports: - - name: http - port: 8200 - targetPort: 8200 - - name: internal - port: 8201 - targetPort: 8201 - selector: - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - component: server - vault-active: "false" -{{- end }} -{{- end }} diff --git a/charts/vkpr/vault-helm/templates/server-headless-service.yaml b/charts/vkpr/vault-helm/templates/server-headless-service.yaml deleted file mode 100644 index 4bb276b1..00000000 --- a/charts/vkpr/vault-helm/templates/server-headless-service.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{ template "vault.mode" . }} -{{- if ne .mode "external" }} -{{- if and (eq (.Values.server.service.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") }} -# Service for Vault cluster -apiVersion: v1 -kind: Service -metadata: - name: {{ template "vault.fullname" . }}-internal - namespace: {{ .Release.Namespace }} - labels: - helm.sh/chart: {{ include "vault.chart" . }} - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - annotations: - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" -{{ template "vault.service.annotations" .}} -spec: - clusterIP: None - publishNotReadyAddresses: true - ports: - - name: "{{ include "vault.scheme" . }}" - port: {{ .Values.server.service.port }} - targetPort: {{ .Values.server.service.targetPort }} - - name: https-internal - port: 8201 - targetPort: 8201 - selector: - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - component: server -{{- end }} -{{- end }} diff --git a/charts/vkpr/vault-helm/templates/server-ingress.yaml b/charts/vkpr/vault-helm/templates/server-ingress.yaml deleted file mode 100644 index fd9662da..00000000 --- a/charts/vkpr/vault-helm/templates/server-ingress.yaml +++ /dev/null @@ -1,44 +0,0 @@ -{{ template "vault.mode" . }} -{{- if ne .mode "external" }} -{{- if .Values.server.ingress.enabled -}} -{{- $serviceName := include "vault.fullname" . -}} -{{- $servicePort := .Values.server.service.port -}} -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: {{ template "vault.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - helm.sh/chart: {{ include "vault.chart" . }} - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- with .Values.server.ingress.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- template "vault.ingress.annotations" . }} -spec: -{{- if .Values.server.ingress.tls }} - tls: - {{- range .Values.server.ingress.tls }} - - hosts: - {{- range .hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .secretName }} - {{- end }} -{{- end }} - rules: - {{- range .Values.server.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range (.paths | default (list "/")) }} - - path: {{ . }} - backend: - serviceName: {{ $serviceName }} - servicePort: {{ $servicePort }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} diff --git a/charts/vkpr/vault-helm/templates/server-service.yaml b/charts/vkpr/vault-helm/templates/server-service.yaml deleted file mode 100644 index 6d505842..00000000 --- a/charts/vkpr/vault-helm/templates/server-service.yaml +++ /dev/null @@ -1,46 +0,0 @@ -{{ template "vault.mode" . }} -{{- if ne .mode "external" }} -{{- if and (eq (.Values.server.service.enabled | toString) "true" ) (eq (.Values.global.enabled | toString) "true") }} -# Service for Vault cluster -apiVersion: v1 -kind: Service -metadata: - name: {{ template "vault.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - helm.sh/chart: {{ include "vault.chart" . }} - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - annotations: - # This must be set in addition to publishNotReadyAddresses due - # to an open issue where it may not work: - # https://github.com/kubernetes/kubernetes/issues/58662 - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" -{{ template "vault.service.annotations" .}} -spec: - {{- if .Values.server.service.type}} - type: {{ .Values.server.service.type }} - {{- end}} - {{- if .Values.server.service.clusterIP }} - clusterIP: {{ .Values.server.service.clusterIP }} - {{- end }} - # We want the servers to become available even if they're not ready - # since this DNS is also used for join operations. - publishNotReadyAddresses: true - ports: - - name: {{ include "vault.scheme" . }} - port: {{ .Values.server.service.port }} - targetPort: {{ .Values.server.service.targetPort }} - {{- if and (.Values.server.service.nodePort) (eq (.Values.server.service.type | toString) "NodePort") }} - nodePort: {{ .Values.server.service.nodePort }} - {{- end }} - - name: https-internal - port: 8201 - targetPort: 8201 - selector: - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - component: server -{{- end }} -{{- end }} diff --git a/charts/vkpr/vault-helm/templates/server-serviceaccount.yaml b/charts/vkpr/vault-helm/templates/server-serviceaccount.yaml deleted file mode 100644 index b3751829..00000000 --- a/charts/vkpr/vault-helm/templates/server-serviceaccount.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{ template "vault.mode" . }} -{{- if ne .mode "external" }} -{{- if and (ne .mode "") (eq (.Values.global.enabled | toString) "true") }} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ template "vault.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - helm.sh/chart: {{ include "vault.chart" . }} - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{ template "vault.serviceAccount.annotations" . }} -{{ end }} -{{ end }} diff --git a/charts/vkpr/vault-helm/templates/server-statefulset.yaml b/charts/vkpr/vault-helm/templates/server-statefulset.yaml deleted file mode 100644 index 3f407096..00000000 --- a/charts/vkpr/vault-helm/templates/server-statefulset.yaml +++ /dev/null @@ -1,167 +0,0 @@ -{{ template "vault.mode" . }} -{{- if ne .mode "external" }} -{{- if and (ne .mode "") (eq (.Values.global.enabled | toString) "true") }} -# StatefulSet to run the actual vault server cluster. -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ template "vault.fullname" . }} - namespace: {{ .Release.Namespace }} - labels: - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - serviceName: {{ template "vault.fullname" . }}-internal - podManagementPolicy: Parallel - replicas: {{ template "vault.replicas" . }} - updateStrategy: - type: {{ .Values.server.updateStrategyType }} - selector: - matchLabels: - app.kubernetes.io/name: {{ template "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - component: server - template: - metadata: - labels: - helm.sh/chart: {{ template "vault.chart" . }} - app.kubernetes.io/name: {{ template "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - component: server - {{- if .Values.server.extraLabels -}} - {{- toYaml .Values.server.extraLabels | nindent 8 -}} - {{- end -}} - {{ template "vault.annotations" . }} - spec: - {{ template "vault.affinity" . }} - {{ template "vault.tolerations" . }} - {{ template "vault.nodeselector" . }} - {{- if .Values.server.priorityClassName }} - priorityClassName: {{ .Values.server.priorityClassName }} - {{- end }} - terminationGracePeriodSeconds: 10 - serviceAccountName: {{ template "vault.fullname" . }} - {{ if .Values.server.shareProcessNamespace }} - shareProcessNamespace: true - {{ end }} - securityContext: - runAsNonRoot: true - runAsGroup: {{ .Values.server.gid | default 1000 }} - runAsUser: {{ .Values.server.uid | default 100 }} - fsGroup: {{ .Values.server.gid | default 1000 }} - volumes: - {{ template "vault.volumes" . }} - containers: - - name: vault - {{ template "vault.resources" . }} - image: {{ .Values.server.image.repository }}:{{ .Values.server.image.tag | default "latest" }} - imagePullPolicy: {{ .Values.server.image.pullPolicy }} - command: {{ template "vault.command" . }} - args: {{ template "vault.args" . }} - env: - - name: HOST_IP - valueFrom: - fieldRef: - fieldPath: status.hostIP - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - - name: VAULT_K8S_POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: VAULT_K8S_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: VAULT_ADDR - value: "{{ include "vault.scheme" . }}://127.0.0.1:8200" - - name: VAULT_API_ADDR - value: "{{ include "vault.scheme" . }}://$(POD_IP):8200" - - name: SKIP_CHOWN - value: "true" - - name: SKIP_SETCAP - value: "true" - - name: HOSTNAME - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: VAULT_CLUSTER_ADDR - value: "https://$(HOSTNAME).{{ template "vault.fullname" . }}-internal:8201" - {{- if and (eq (.Values.server.ha.raft.enabled | toString) "true") (eq (.Values.server.ha.raft.setNodeId | toString) "true") }} - - name: VAULT_RAFT_NODE_ID - valueFrom: - fieldRef: - fieldPath: metadata.name - {{- end }} - {{ template "vault.envs" . }} - {{- include "vault.extraEnvironmentVars" .Values.server | nindent 12 }} - {{- include "vault.extraSecretEnvironmentVars" .Values.server | nindent 12 }} - volumeMounts: - {{ template "vault.mounts" . }} - ports: - - containerPort: 8200 - name: {{ include "vault.scheme" . }} - - containerPort: 8201 - name: https-internal - - containerPort: 8202 - name: {{ include "vault.scheme" . }}-rep - {{- if .Values.server.readinessProbe.enabled }} - readinessProbe: - {{- if .Values.server.readinessProbe.path }} - httpGet: - path: {{ .Values.server.readinessProbe.path | quote }} - port: 8200 - scheme: {{ include "vault.scheme" . | upper }} - {{- else }} - # Check status; unsealed vault servers return 0 - # The exit code reflects the seal status: - # 0 - unsealed - # 1 - error - # 2 - sealed - exec: - command: ["/bin/sh", "-ec", "vault status -tls-skip-verify"] - {{- end }} - failureThreshold: 2 - initialDelaySeconds: 5 - periodSeconds: 3 - successThreshold: 1 - timeoutSeconds: 5 - {{- end }} - {{- if .Values.server.livenessProbe.enabled }} - livenessProbe: - httpGet: - path: {{ .Values.server.livenessProbe.path | quote }} - port: 8200 - scheme: {{ include "vault.scheme" . | upper }} - initialDelaySeconds: {{ .Values.server.livenessProbe.initialDelaySeconds }} - periodSeconds: 3 - successThreshold: 1 - timeoutSeconds: 5 - {{- end }} - lifecycle: - # Vault container doesn't receive SIGTERM from Kubernetes - # and after the grace period ends, Kube sends SIGKILL. This - # causes issues with graceful shutdowns such as deregistering itself - # from Consul (zombie services). - preStop: - exec: - command: [ - "/bin/sh", "-c", - # Adding a sleep here to give the pod eviction a - # chance to propagate, so requests will not be made - # to this pod while it's terminating - "sleep {{ .Values.server.preStopSleepSeconds }} && kill -SIGTERM $(pidof vault)", - ] - {{- if .Values.server.extraContainers }} - {{ toYaml .Values.server.extraContainers | nindent 8}} - {{- end }} - {{- if .Values.global.imagePullSecrets }} - imagePullSecrets: - {{- toYaml .Values.global.imagePullSecrets | nindent 8 }} - {{- end }} - {{ template "vault.volumeclaims" . }} -{{ end }} -{{ end }} diff --git a/charts/vkpr/vault-helm/templates/ui-service.yaml b/charts/vkpr/vault-helm/templates/ui-service.yaml deleted file mode 100644 index 8b8a2c92..00000000 --- a/charts/vkpr/vault-helm/templates/ui-service.yaml +++ /dev/null @@ -1,47 +0,0 @@ -{{ template "vault.mode" . }} -{{- if ne .mode "external" }} -{{- if and (ne .mode "") (eq (.Values.global.enabled | toString) "true") }} -{{- if eq (.Values.ui.enabled | toString) "true" }} -# Headless service for Vault server DNS entries. This service should only -# point to Vault servers. For access to an agent, one should assume that -# the agent is installed locally on the node and the NODE_IP should be used. -# If the node can't run a Vault agent, then this service can be used to -# communicate directly to a server agent. -apiVersion: v1 -kind: Service -metadata: - name: {{ template "vault.fullname" . }}-ui - namespace: {{ .Release.Namespace }} - labels: - helm.sh/chart: {{ include "vault.chart" . }} - app.kubernetes.io/name: {{ include "vault.name" . }}-ui - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- template "vault.ui.annotations" . }} -spec: - selector: - app.kubernetes.io/name: {{ include "vault.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - component: server - publishNotReadyAddresses: true - ports: - - name: {{ include "vault.scheme" . }} - port: {{ .Values.ui.externalPort }} - targetPort: 8200 - {{- if .Values.ui.serviceNodePort }} - nodePort: {{ .Values.ui.serviceNodePort }} - {{- end }} - type: {{ .Values.ui.serviceType }} - {{- if and (eq (.Values.ui.serviceType | toString) "LoadBalancer") (.Values.ui.loadBalancerSourceRanges) }} - loadBalancerSourceRanges: - {{- range $cidr := .Values.ui.loadBalancerSourceRanges }} - - {{ $cidr }} - {{- end }} - {{- end }} - {{- if and (eq (.Values.ui.serviceType | toString) "LoadBalancer") (.Values.ui.loadBalancerIP) }} - loadBalancerIP: {{ .Values.ui.loadBalancerIP }} - {{- end }} -{{- end -}} - -{{ end }} -{{ end }} diff --git a/charts/vkpr/vault-helm/test/acceptance/_helpers.bash b/charts/vkpr/vault-helm/test/acceptance/_helpers.bash deleted file mode 100644 index 031daf5d..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/_helpers.bash +++ /dev/null @@ -1,159 +0,0 @@ -# name_prefix returns the prefix of the resources within Kubernetes. -name_prefix() { - printf "vault" -} - -# chart_dir returns the directory for the chart -chart_dir() { - echo ${BATS_TEST_DIRNAME}/../.. -} - -# helm_install installs the vault chart. This will source overridable -# values from the "values.yaml" file in this directory. This can be set -# by CI or other environments to do test-specific overrides. Note that its -# easily possible to break tests this way so be careful. -helm_install() { - local values="${BATS_TEST_DIRNAME}/values.yaml" - if [ ! -f "${values}" ]; then - touch $values - fi - - helm install -f ${values} \ - --name vault \ - ${BATS_TEST_DIRNAME}/../.. -} - -# helm_install_ha installs the vault chart using HA mode. This will source -# overridable values from the "values.yaml" file in this directory. This can be -# set by CI or other environments to do test-specific overrides. Note that its -# easily possible to break tests this way so be careful. -helm_install_ha() { - local values="${BATS_TEST_DIRNAME}/values.yaml" - if [ ! -f "${values}" ]; then - touch $values - fi - - helm install -f ${values} \ - --name vault \ - --set 'server.enabled=false' \ - --set 'serverHA.enabled=true' \ - ${BATS_TEST_DIRNAME}/../.. -} - -# wait for consul to be running -wait_for_running_consul() { - check() { - # This requests the pod and checks whether the status is running - # and the ready state is true. If so, it outputs the name. Otherwise - # it outputs empty. Therefore, to check for success, check for nonzero - # string length. - kubectl get pods -l component=client -o json | \ - jq -r '.items[0] | select( - .status.phase == "Running" and - ([ .status.conditions[] | select(.type == "Ready" and .status == "True") ] | length) == 1 - ) | .metadata.name' - } - - for i in $(seq 60); do - if [ -n "$(check ${POD_NAME})" ]; then - echo "consul clients are ready." - return - fi - - echo "Waiting for ${POD_NAME} to be ready..." - sleep 2 - done - - echo "consul clients never became ready." - exit 1 -} - -# wait for a pod to be ready -wait_for_running() { - POD_NAME=$1 - - check() { - # This requests the pod and checks whether the status is running - # and the ready state is true. If so, it outputs the name. Otherwise - # it outputs empty. Therefore, to check for success, check for nonzero - # string length. - kubectl get pods $1 -o json | \ - jq -r 'select( - .status.phase == "Running" and - ([ .status.conditions[] | select(.type == "Ready" and .status == "False") ] | length) == 1 - ) | .metadata.namespace + "/" + .metadata.name' - } - - for i in $(seq 60); do - if [ -n "$(check ${POD_NAME})" ]; then - echo "${POD_NAME} is ready." - sleep 5 - return - fi - - echo "Waiting for ${POD_NAME} to be ready..." - sleep 2 - done - - echo "${POD_NAME} never became ready." - exit 1 -} - -wait_for_ready() { - POD_NAME=$1 - - check() { - # This requests the pod and checks whether the status is running - # and the ready state is true. If so, it outputs the name. Otherwise - # it outputs empty. Therefore, to check for success, check for nonzero - # string length. - kubectl get pods $1 -o json | \ - jq -r 'select( - .status.phase == "Running" and - ([ .status.conditions[] | select(.type == "Ready" and .status == "True") ] | length) == 1 - ) | .metadata.namespace + "/" + .metadata.name' - } - - for i in $(seq 60); do - if [ -n "$(check ${POD_NAME})" ]; then - echo "${POD_NAME} is ready." - sleep 5 - return - fi - - echo "Waiting for ${POD_NAME} to be ready..." - sleep 2 - done - - echo "${POD_NAME} never became ready." - exit 1 -} - -wait_for_complete_job() { - POD_NAME=$1 - - check() { - # This requests the pod and checks whether the status is running - # and the ready state is true. If so, it outputs the name. Otherwise - # it outputs empty. Therefore, to check for success, check for nonzero - # string length. - kubectl get job $1 -o json | \ - jq -r 'select( - .status.succeeded == 1 - ) | .metadata.namespace + "/" + .metadata.name' - } - - for i in $(seq 60); do - if [ -n "$(check ${POD_NAME})" ]; then - echo "${POD_NAME} is complete." - sleep 5 - return - fi - - echo "Waiting for ${POD_NAME} to be complete..." - sleep 2 - done - - echo "${POD_NAME} never completed." - exit 1 -} diff --git a/charts/vkpr/vault-helm/test/acceptance/injector-test/bootstrap.sh b/charts/vkpr/vault-helm/test/acceptance/injector-test/bootstrap.sh deleted file mode 100755 index d738fd28..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/injector-test/bootstrap.sh +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/sh - -OUTPUT=/tmp/output.txt - -vault operator init -n 1 -t 1 >> ${OUTPUT?} - -unseal=$(cat ${OUTPUT?} | grep "Unseal Key 1:" | sed -e "s/Unseal Key 1: //g") -root=$(cat ${OUTPUT?} | grep "Initial Root Token:" | sed -e "s/Initial Root Token: //g") - -vault operator unseal ${unseal?} - -vault login -no-print ${root?} - -vault policy write db-backup /vault/userconfig/test/pgdump-policy.hcl - -vault auth enable kubernetes - -vault write auth/kubernetes/config \ - token_reviewer_jwt="$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)" \ - kubernetes_host=https://${KUBERNETES_PORT_443_TCP_ADDR}:443 \ - kubernetes_ca_cert=@/var/run/secrets/kubernetes.io/serviceaccount/ca.crt - -vault write auth/kubernetes/role/db-backup \ - bound_service_account_names=pgdump \ - bound_service_account_namespaces=acceptance \ - policies=db-backup \ - ttl=1h - -vault secrets enable database - -vault write database/config/postgresql \ - plugin_name=postgresql-database-plugin \ - allowed_roles="db-backup" \ - connection_url="postgresql://{{username}}:{{password}}@postgres:5432/mydb?sslmode=disable" \ - username="vault" \ - password="vault" - -vault write database/roles/db-backup \ - db_name=postgresql \ - creation_statements="CREATE ROLE \"{{name}}\" WITH LOGIN PASSWORD '{{password}}' VALID UNTIL '{{expiration}}'; \ - GRANT CONNECT ON DATABASE mydb TO \"{{name}}\"; \ - GRANT USAGE ON SCHEMA app TO \"{{name}}\"; \ - GRANT SELECT ON ALL TABLES IN SCHEMA app TO \"{{name}}\";" \ - revocation_statements="ALTER ROLE \"{{name}}\" NOLOGIN;"\ - default_ttl="1h" \ - max_ttl="24h" diff --git a/charts/vkpr/vault-helm/test/acceptance/injector-test/job.yaml b/charts/vkpr/vault-helm/test/acceptance/injector-test/job.yaml deleted file mode 100644 index d665383c..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/injector-test/job.yaml +++ /dev/null @@ -1,39 +0,0 @@ ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: pgdump - labels: - app: pgdump ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: pgdump -spec: - backoffLimit: 0 - template: - metadata: - name: pgdump - labels: - app: pgdump - annotations: - vault.hashicorp.com/agent-inject: "true" - vault.hashicorp.com/agent-inject-secret-db-creds: "database/creds/db-backup" - vault.hashicorp.com/agent-inject-template-db-creds: | - {{- with secret "database/creds/db-backup" -}} - postgresql://{{ .Data.username }}:{{ .Data.password }}@postgres.acceptance.svc.cluster.local:5432/mydb - {{- end }} - vault.hashicorp.com/role: "db-backup" - vault.hashicorp.com/agent-pre-populate-only: "true" - spec: - serviceAccountName: pgdump - containers: - - name: pgdump - image: postgres:11.5 - command: - - "/bin/sh" - - "-ec" - args: - - "/usr/bin/pg_dump $(cat /vault/secrets/db-creds) --no-owner > /dev/stdout" - restartPolicy: Never diff --git a/charts/vkpr/vault-helm/test/acceptance/injector-test/pg-deployment.yaml b/charts/vkpr/vault-helm/test/acceptance/injector-test/pg-deployment.yaml deleted file mode 100644 index 13389ffd..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/injector-test/pg-deployment.yaml +++ /dev/null @@ -1,69 +0,0 @@ ---- -apiVersion: v1 -kind: Service -metadata: - name: postgres - labels: - app: postgres -spec: - type: ClusterIP - ports: - - port: 5432 - targetPort: 5432 - selector: - app: postgres ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: postgres -spec: - replicas: 1 - selector: - matchLabels: - app: postgres - template: - metadata: - labels: - service: postgres - app: postgres - spec: - containers: - - name: postgres - image: postgres:11.5 - ports: - - containerPort: 5432 - env: - - name: POSTGRES_DB - value: mydb - - name: POSTGRES_USER - value: postgres - - name: POSTGRES_PASSWORD - value: password - volumeMounts: - - mountPath: "/var/lib/postgresql/data" - name: "pgdata" - - mountPath: "/docker-entrypoint-initdb.d" - name: "pgconf" - volumes: - - name: pgdata - emptyDir: {} - - name: pgconf - configMap: - name: "pg-init" ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: pg-init - labels: - app: postgres -data: - setup.sql: | - CREATE ROLE vault; - ALTER ROLE vault WITH SUPERUSER LOGIN PASSWORD 'vault'; - - \c mydb - CREATE SCHEMA app; - CREATE TABLE app.inventory(id int); - INSERT INTO app.inventory(id) VALUES (0); diff --git a/charts/vkpr/vault-helm/test/acceptance/injector-test/pgdump-policy.hcl b/charts/vkpr/vault-helm/test/acceptance/injector-test/pgdump-policy.hcl deleted file mode 100644 index 88a6cd66..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/injector-test/pgdump-policy.hcl +++ /dev/null @@ -1,3 +0,0 @@ -path "database/creds/db-backup" { - capabilities = ["read"] -} diff --git a/charts/vkpr/vault-helm/test/acceptance/injector.bats b/charts/vkpr/vault-helm/test/acceptance/injector.bats deleted file mode 100644 index e7fb393a..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/injector.bats +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "injector: testing deployment" { - cd `chart_dir` - - kubectl delete namespace acceptance --ignore-not-found=true - kubectl create namespace acceptance - kubectl config set-context --current --namespace=acceptance - - kubectl create -f ./test/acceptance/injector-test/pg-deployment.yaml - sleep 5 - wait_for_ready $(kubectl get pod -l app=postgres -o jsonpath="{.items[0].metadata.name}") - - kubectl create secret generic test \ - --from-file ./test/acceptance/injector-test/pgdump-policy.hcl \ - --from-file ./test/acceptance/injector-test/bootstrap.sh - - kubectl label secret test app=vault-agent-demo - - helm install "$(name_prefix)" \ - --set="server.extraVolumes[0].type=secret" \ - --set="server.extraVolumes[0].name=test" . - wait_for_running $(name_prefix)-0 - - wait_for_ready $(kubectl get pod -l component=webhook -o jsonpath="{.items[0].metadata.name}") - - kubectl exec -ti "$(name_prefix)-0" -- /bin/sh -c "cp /vault/userconfig/test/bootstrap.sh /tmp/bootstrap.sh && chmod +x /tmp/bootstrap.sh && /tmp/bootstrap.sh" - sleep 5 - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "false" ] - - local init_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "true" ] - - - kubectl create -f ./test/acceptance/injector-test/job.yaml - wait_for_complete_job "pgdump" -} - -# Clean up -teardown() { - if [[ ${CLEANUP:-true} == "true" ]] - then - echo "helm/pvc teardown" - helm delete vault - kubectl delete --all pvc - kubectl delete secret test - kubectl delete job pgdump - kubectl delete deployment postgres - kubectl delete namespace acceptance - fi -} diff --git a/charts/vkpr/vault-helm/test/acceptance/server-annotations.bats b/charts/vkpr/vault-helm/test/acceptance/server-annotations.bats deleted file mode 100644 index d382788a..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/server-annotations.bats +++ /dev/null @@ -1,46 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/annotations: testing yaml and yaml-formatted string formats" { - cd `chart_dir` - kubectl delete namespace acceptance --ignore-not-found=true - kubectl create namespace acceptance - kubectl config set-context --current --namespace=acceptance - - helm install "$(name_prefix)" -f ./test/acceptance/server-test/annotations-overrides.yaml . - wait_for_running $(name_prefix)-0 - - # service annotations - local awesome=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.metadata.annotations.active') - [ "${awesome}" == "sometimes" ] - - local pickMe=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.metadata.annotations.pickMe') - [ "${pickMe}" == "please" ] - - local environment=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.metadata.annotations.environment') - [ "${environment}" == "production" ] - - local milk=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.metadata.annotations.milk') - [ "${milk}" == "oat" ] - - local myName=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.metadata.annotations.myName') - [ "${myName}" == "$(name_prefix)" ] - -} - -# Clean up -teardown() { - if [[ ${CLEANUP:-true} == "true" ]] - then - echo "helm/pvc teardown" - helm delete $(name_prefix) - kubectl delete --all pvc - kubectl delete namespace acceptance --ignore-not-found=true - fi -} diff --git a/charts/vkpr/vault-helm/test/acceptance/server-dev.bats b/charts/vkpr/vault-helm/test/acceptance/server-dev.bats deleted file mode 100644 index ffda9464..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/server-dev.bats +++ /dev/null @@ -1,64 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/dev: testing deployment" { - cd `chart_dir` - kubectl delete namespace acceptance --ignore-not-found=true - kubectl create namespace acceptance - kubectl config set-context --current --namespace=acceptance - - helm install "$(name_prefix)" --set='server.dev.enabled=true' . - wait_for_running $(name_prefix)-0 - - # Replicas - local replicas=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.replicas') - [ "${replicas}" == "1" ] - - # Volume Mounts - local volumeCount=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.containers[0].volumeMounts | length') - [ "${volumeCount}" == "0" ] - - # Service - local service=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.clusterIP') - [ "${service}" != "None" ] - - local service=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.type') - [ "${service}" == "ClusterIP" ] - - local ports=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.ports | length') - [ "${ports}" == "2" ] - - local ports=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.ports[0].port') - [ "${ports}" == "8200" ] - - local ports=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.ports[1].port') - [ "${ports}" == "8201" ] - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "false" ] - - local init_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "true" ] -} - -# Clean up -teardown() { - if [[ ${CLEANUP:-true} == "true" ]] - then - echo "helm/pvc teardown" - helm delete vault - kubectl delete --all pvc - kubectl delete namespace acceptance --ignore-not-found=true - fi -} diff --git a/charts/vkpr/vault-helm/test/acceptance/server-ha-enterprise-dr.bats b/charts/vkpr/vault-helm/test/acceptance/server-ha-enterprise-dr.bats deleted file mode 100644 index 35348e3c..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/server-ha-enterprise-dr.bats +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/ha-enterprise-raft: testing DR deployment" { - cd `chart_dir` - - helm install "$(name_prefix)-east" \ - --set='server.image.repository=hashicorp/vault-enterprise' \ - --set='server.image.tag=1.4.0_ent' \ - --set='injector.enabled=false' \ - --set='server.ha.enabled=true' \ - --set='server.ha.raft.enabled=true' . - wait_for_running "$(name_prefix)-east-0" - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "true" ] - - local init_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "false" ] - - # Vault Init - local init=$(kubectl exec -ti "$(name_prefix)-east-0" -- \ - vault operator init -format=json -n 1 -t 1) - - local primary_token=$(echo ${init} | jq -r '.unseal_keys_b64[0]') - [ "${primary_token}" != "" ] - - local primary_root=$(echo ${init} | jq -r '.root_token') - [ "${primary_root}" != "" ] - - kubectl exec -ti "$(name_prefix)-east-0" -- vault operator unseal ${primary_token} - wait_for_ready "$(name_prefix)-east-0" - - sleep 10 - - # Vault Unseal - local pods=($(kubectl get pods --selector='app.kubernetes.io/name=vault' -o json | jq -r '.items[].metadata.name')) - for pod in "${pods[@]}" - do - if [[ ${pod?} != "$(name_prefix)-east-0" ]] - then - kubectl exec -ti ${pod} -- vault operator raft join http://$(name_prefix)-east-0.$(name_prefix)-east-internal:8200 - kubectl exec -ti ${pod} -- vault operator unseal ${primary_token} - wait_for_ready "${pod}" - fi - done - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "false" ] - - local init_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "true" ] - - kubectl exec "$(name_prefix)-east-0" -- vault login ${primary_root} - - local raft_status=$(kubectl exec "$(name_prefix)-east-0" -- vault operator raft list-peers -format=json | - jq -r '.data.config.servers | length') - [ "${raft_status}" == "3" ] - - kubectl exec -ti $(name_prefix)-east-0 -- vault write -f sys/replication/dr/primary/enable primary_cluster_addr=https://$(name_prefix)-east-active:8201 - - local secondary=$(kubectl exec -ti "$(name_prefix)-east-0" -- vault write sys/replication/dr/primary/secondary-token id=secondary -format=json) - [ "${secondary}" != "" ] - - local secondary_replica_token=$(echo ${secondary} | jq -r '.wrap_info.token') - [ "${secondary_replica_token}" != "" ] - - # Install vault-west - helm install "$(name_prefix)-west" \ - --set='injector.enabled=false' \ - --set='server.image.repository=hashicorp/vault-enterprise' \ - --set='server.image.tag=1.4.0_ent' \ - --set='server.ha.enabled=true' \ - --set='server.ha.raft.enabled=true' . - wait_for_running "$(name_prefix)-west-0" - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "true" ] - - local init_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "false" ] - - # Vault Init - local init=$(kubectl exec -ti "$(name_prefix)-west-0" -- \ - vault operator init -format=json -n 1 -t 1) - - local secondary_token=$(echo ${init} | jq -r '.unseal_keys_b64[0]') - [ "${secondary_token}" != "" ] - - local secondary_root=$(echo ${init} | jq -r '.root_token') - [ "${secondary_root}" != "" ] - - kubectl exec -ti "$(name_prefix)-west-0" -- vault operator unseal ${secondary_token} - wait_for_ready "$(name_prefix)-west-0" - - sleep 10 - - # Vault Unseal - local pods=($(kubectl get pods --selector='app.kubernetes.io/instance=vault-west' -o json | jq -r '.items[].metadata.name')) - for pod in "${pods[@]}" - do - if [[ ${pod?} != "$(name_prefix)-west-0" ]] - then - kubectl exec -ti ${pod} -- vault operator raft join http://$(name_prefix)-west-0.$(name_prefix)-west-internal:8200 - kubectl exec -ti ${pod} -- vault operator unseal ${secondary_token} - wait_for_ready "${pod}" - fi - done - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "false" ] - - local init_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "true" ] - - kubectl exec "$(name_prefix)-west-0" -- vault login ${secondary_root} - - local raft_status=$(kubectl exec "$(name_prefix)-west-0" -- vault operator raft list-peers -format=json | - jq -r '.data.config.servers | length') - [ "${raft_status}" == "3" ] - - kubectl exec -ti "$(name_prefix)-west-0" -- vault write sys/replication/dr/secondary/enable token=${secondary_replica_token} - - sleep 10 - - local pods=($(kubectl get pods --selector='app.kubernetes.io/instance=vault-west' -o json | jq -r '.items[].metadata.name')) - for pod in "${pods[@]}" - do - if [[ ${pod?} != "$(name_prefix)-west-0" ]] - then - kubectl delete pod "${pod?}" - wait_for_running "${pod?}" - kubectl exec -ti ${pod} -- vault operator unseal ${primary_token} - wait_for_ready "${pod}" - fi - done -} - -setup() { - kubectl delete namespace acceptance --ignore-not-found=true - kubectl create namespace acceptance - kubectl config set-context --current --namespace=acceptance -} - -#cleanup -teardown() { - if [[ ${CLEANUP:-true} == "true" ]] - then - helm delete vault-east - helm delete vault-west - kubectl delete --all pvc - kubectl delete namespace acceptance --ignore-not-found=true - fi -} diff --git a/charts/vkpr/vault-helm/test/acceptance/server-ha-enterprise-perf.bats b/charts/vkpr/vault-helm/test/acceptance/server-ha-enterprise-perf.bats deleted file mode 100644 index 6543663e..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/server-ha-enterprise-perf.bats +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/ha-enterprise-raft: testing performance replica deployment" { - cd `chart_dir` - - helm install "$(name_prefix)-east" \ - --set='injector.enabled=false' \ - --set='server.image.repository=hashicorp/vault-enterprise' \ - --set='server.image.tag=1.4.0_ent' \ - --set='server.ha.enabled=true' \ - --set='server.ha.raft.enabled=true' . - wait_for_running "$(name_prefix)-east-0" - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "true" ] - - local init_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "false" ] - - # Vault Init - local init=$(kubectl exec -ti "$(name_prefix)-east-0" -- \ - vault operator init -format=json -n 1 -t 1) - - local primary_token=$(echo ${init} | jq -r '.unseal_keys_b64[0]') - [ "${primary_token}" != "" ] - - local primary_root=$(echo ${init} | jq -r '.root_token') - [ "${primary_root}" != "" ] - - kubectl exec -ti "$(name_prefix)-east-0" -- vault operator unseal ${primary_token} - wait_for_ready "$(name_prefix)-east-0" - - sleep 10 - - # Vault Unseal - local pods=($(kubectl get pods --selector='app.kubernetes.io/name=vault' -o json | jq -r '.items[].metadata.name')) - for pod in "${pods[@]}" - do - if [[ ${pod?} != "$(name_prefix)-east-0" ]] - then - kubectl exec -ti ${pod} -- vault operator raft join http://$(name_prefix)-east-0.$(name_prefix)-east-internal:8200 - kubectl exec -ti ${pod} -- vault operator unseal ${primary_token} - wait_for_ready "${pod}" - fi - done - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "false" ] - - local init_status=$(kubectl exec "$(name_prefix)-east-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "true" ] - - kubectl exec "$(name_prefix)-east-0" -- vault login ${primary_root} - - local raft_status=$(kubectl exec "$(name_prefix)-east-0" -- vault operator raft list-peers -format=json | - jq -r '.data.config.servers | length') - [ "${raft_status}" == "3" ] - - kubectl exec -ti $(name_prefix)-east-0 -- vault write -f sys/replication/performance/primary/enable primary_cluster_addr=https://$(name_prefix)-east-active:8201 - - local secondary=$(kubectl exec -ti "$(name_prefix)-east-0" -- vault write sys/replication/performance/primary/secondary-token id=secondary -format=json) - [ "${secondary}" != "" ] - - local secondary_replica_token=$(echo ${secondary} | jq -r '.wrap_info.token') - [ "${secondary_replica_token}" != "" ] - - # Install vault-west - helm install "$(name_prefix)-west" \ - --set='injector.enabled=false' \ - --set='server.image.repository=hashicorp/vault-enterprise' \ - --set='server.image.tag=1.4.0_ent' \ - --set='server.ha.enabled=true' \ - --set='server.ha.raft.enabled=true' . - wait_for_running "$(name_prefix)-west-0" - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "true" ] - - local init_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "false" ] - - # Vault Init - local init=$(kubectl exec -ti "$(name_prefix)-west-0" -- \ - vault operator init -format=json -n 1 -t 1) - - local secondary_token=$(echo ${init} | jq -r '.unseal_keys_b64[0]') - [ "${secondary_token}" != "" ] - - local secondary_root=$(echo ${init} | jq -r '.root_token') - [ "${secondary_root}" != "" ] - - kubectl exec -ti "$(name_prefix)-west-0" -- vault operator unseal ${secondary_token} - wait_for_ready "$(name_prefix)-west-0" - - sleep 10 - - # Vault Unseal - local pods=($(kubectl get pods --selector='app.kubernetes.io/instance=vault-west' -o json | jq -r '.items[].metadata.name')) - for pod in "${pods[@]}" - do - if [[ ${pod?} != "$(name_prefix)-west-0" ]] - then - kubectl exec -ti ${pod} -- vault operator raft join http://$(name_prefix)-west-0.$(name_prefix)-west-internal:8200 - kubectl exec -ti ${pod} -- vault operator unseal ${secondary_token} - wait_for_ready "${pod}" - fi - done - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "false" ] - - local init_status=$(kubectl exec "$(name_prefix)-west-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "true" ] - - kubectl exec "$(name_prefix)-west-0" -- vault login ${secondary_root} - - local raft_status=$(kubectl exec "$(name_prefix)-west-0" -- vault operator raft list-peers -format=json | - jq -r '.data.config.servers | length') - [ "${raft_status}" == "3" ] - - kubectl exec -ti "$(name_prefix)-west-0" -- vault write sys/replication/performance/secondary/enable token=${secondary_replica_token} - - sleep 10 - - local pods=($(kubectl get pods --selector='app.kubernetes.io/instance=vault-west' -o json | jq -r '.items[].metadata.name')) - for pod in "${pods[@]}" - do - if [[ ${pod?} != "$(name_prefix)-west-0" ]] - then - kubectl exec -ti ${pod} -- vault operator unseal ${primary_token} - wait_for_ready "${pod}" - fi - done -} - -setup() { - kubectl delete namespace acceptance --ignore-not-found=true - kubectl create namespace acceptance - kubectl config set-context --current --namespace=acceptance -} - -#cleanup -teardown() { - if [[ ${CLEANUP:-true} == "true" ]] - then - helm delete vault-east - helm delete vault-west - kubectl delete --all pvc - kubectl delete namespace acceptance --ignore-not-found=true - fi -} diff --git a/charts/vkpr/vault-helm/test/acceptance/server-ha-raft.bats b/charts/vkpr/vault-helm/test/acceptance/server-ha-raft.bats deleted file mode 100644 index b6f1f255..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/server-ha-raft.bats +++ /dev/null @@ -1,119 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/ha-raft: testing deployment" { - cd `chart_dir` - - helm install "$(name_prefix)" \ - --set='server.ha.enabled=true' \ - --set='server.ha.raft.enabled=true' . - wait_for_running $(name_prefix)-0 - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "true" ] - - local init_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "false" ] - - # Replicas - local replicas=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.replicas') - [ "${replicas}" == "3" ] - - # Volume Mounts - local volumeCount=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.containers[0].volumeMounts | length') - [ "${volumeCount}" == "2" ] - - # Volumes - local volumeCount=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.volumes | length') - [ "${volumeCount}" == "1" ] - - local volume=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.volumes[0].configMap.name') - [ "${volume}" == "$(name_prefix)-config" ] - - # Service - local service=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.clusterIP') - [ "${service}" != "None" ] - - local service=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.type') - [ "${service}" == "ClusterIP" ] - - local ports=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.ports | length') - [ "${ports}" == "2" ] - - local ports=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.ports[0].port') - [ "${ports}" == "8200" ] - - local ports=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.ports[1].port') - [ "${ports}" == "8201" ] - - # Vault Init - local init=$(kubectl exec -ti "$(name_prefix)-0" -- \ - vault operator init -format=json -n 1 -t 1) - - local token=$(echo ${init} | jq -r '.unseal_keys_b64[0]') - [ "${token}" != "" ] - - local root=$(echo ${init} | jq -r '.root_token') - [ "${root}" != "" ] - - kubectl exec -ti vault-0 -- vault operator unseal ${token} - wait_for_ready "$(name_prefix)-0" - - sleep 5 - - # Vault Unseal - local pods=($(kubectl get pods --selector='app.kubernetes.io/name=vault' -o json | jq -r '.items[].metadata.name')) - for pod in "${pods[@]}" - do - if [[ ${pod?} != "$(name_prefix)-0" ]] - then - kubectl exec -ti ${pod} -- vault operator raft join http://$(name_prefix)-0.$(name_prefix)-internal:8200 - kubectl exec -ti ${pod} -- vault operator unseal ${token} - wait_for_ready "${pod}" - fi - done - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "false" ] - - local init_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "true" ] - - kubectl exec "$(name_prefix)-0" -- vault login ${root} - - local raft_status=$(kubectl exec "$(name_prefix)-0" -- vault operator raft list-peers -format=json | - jq -r '.data.config.servers | length') - [ "${raft_status}" == "3" ] -} - -setup() { - kubectl delete namespace acceptance --ignore-not-found=true - kubectl create namespace acceptance - kubectl config set-context --current --namespace=acceptance -} - -#cleanup -teardown() { - if [[ ${CLEANUP:-true} == "true" ]] - then - helm delete vault - kubectl delete --all pvc - kubectl delete namespace acceptance --ignore-not-found=true - fi -} diff --git a/charts/vkpr/vault-helm/test/acceptance/server-ha.bats b/charts/vkpr/vault-helm/test/acceptance/server-ha.bats deleted file mode 100644 index 4cb4a754..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/server-ha.bats +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/ha: testing deployment" { - cd `chart_dir` - - helm install "$(name_prefix)" \ - --set='server.ha.enabled=true' . - wait_for_running $(name_prefix)-0 - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "true" ] - - local init_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "false" ] - - # Replicas - local replicas=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.replicas') - [ "${replicas}" == "3" ] - - # Volume Mounts - local volumeCount=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.containers[0].volumeMounts | length') - [ "${volumeCount}" == "1" ] - - # Volumes - local volumeCount=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.volumes | length') - [ "${volumeCount}" == "1" ] - - local volume=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.volumes[0].configMap.name') - [ "${volume}" == "$(name_prefix)-config" ] - - # Service - local service=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.clusterIP') - [ "${service}" != "None" ] - - local service=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.type') - [ "${service}" == "ClusterIP" ] - - local ports=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.ports | length') - [ "${ports}" == "2" ] - - local ports=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.ports[0].port') - [ "${ports}" == "8200" ] - - local ports=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.ports[1].port') - [ "${ports}" == "8201" ] - - # Vault Init - local token=$(kubectl exec -ti "$(name_prefix)-0" -- \ - vault operator init -format=json -n 1 -t 1 | \ - jq -r '.unseal_keys_b64[0]') - [ "${token}" != "" ] - - # Vault Unseal - local pods=($(kubectl get pods --selector='app.kubernetes.io/name=vault' -o json | jq -r '.items[].metadata.name')) - for pod in "${pods[@]}" - do - kubectl exec -ti ${pod} -- vault operator unseal ${token} - done - - wait_for_ready "$(name_prefix)-0" - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "false" ] - - local init_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "true" ] -} - -# setup a consul env -setup() { - kubectl delete namespace acceptance --ignore-not-found=true - kubectl create namespace acceptance - kubectl config set-context --current --namespace=acceptance - - helm install consul \ - https://github.com/hashicorp/consul-helm/archive/v0.16.2.tar.gz \ - --set 'ui.enabled=false' \ - - wait_for_running_consul -} - -#cleanup -teardown() { - if [[ ${CLEANUP:-true} == "true" ]] - then - helm delete vault - helm delete consul - kubectl delete --all pvc - kubectl delete namespace acceptance --ignore-not-found=true - fi -} diff --git a/charts/vkpr/vault-helm/test/acceptance/server-test/annotations-overrides.yaml b/charts/vkpr/vault-helm/test/acceptance/server-test/annotations-overrides.yaml deleted file mode 100644 index 459576a9..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/server-test/annotations-overrides.yaml +++ /dev/null @@ -1,9 +0,0 @@ -server: - annotations: | - environment: production - milk: oat - myName: "{{ .Release.Name }}" - service: - annotations: - active: sometimes - pickMe: please diff --git a/charts/vkpr/vault-helm/test/acceptance/server.bats b/charts/vkpr/vault-helm/test/acceptance/server.bats deleted file mode 100644 index ce7843fb..00000000 --- a/charts/vkpr/vault-helm/test/acceptance/server.bats +++ /dev/null @@ -1,116 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/standalone: testing deployment" { - cd `chart_dir` - - kubectl delete namespace acceptance --ignore-not-found=true - kubectl create namespace acceptance - kubectl config set-context --current --namespace=acceptance - - helm install "$(name_prefix)" . - wait_for_running $(name_prefix)-0 - - # Sealed, not initialized - local sealed_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "true" ] - - local init_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "false" ] - - # Replicas - local replicas=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.replicas') - [ "${replicas}" == "1" ] - - # Affinity - local affinity=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.affinity') - [ "${affinity}" != "null" ] - - # Volume Mounts - local volumeCount=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.containers[0].volumeMounts | length') - [ "${volumeCount}" == "2" ] - - local mountName=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.containers[0].volumeMounts[0].name') - [ "${mountName}" == "data" ] - - local mountPath=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.containers[0].volumeMounts[0].mountPath') - [ "${mountPath}" == "/vault/data" ] - - # Volumes - local volumeCount=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.volumes | length') - [ "${volumeCount}" == "1" ] - - local volume=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.volumes[0].configMap.name') - [ "${volume}" == "$(name_prefix)-config" ] - - # Security Context - local fsGroup=$(kubectl get statefulset "$(name_prefix)" --output json | - jq -r '.spec.template.spec.securityContext.fsGroup') - [ "${fsGroup}" == "1000" ] - - # Service - local service=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.clusterIP') - [ "${service}" != "None" ] - - local service=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.type') - [ "${service}" == "ClusterIP" ] - - local ports=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.ports | length') - [ "${ports}" == "2" ] - - local ports=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.ports[0].port') - [ "${ports}" == "8200" ] - - local ports=$(kubectl get service "$(name_prefix)" --output json | - jq -r '.spec.ports[1].port') - [ "${ports}" == "8201" ] - - # Vault Init - local token=$(kubectl exec -ti "$(name_prefix)-0" -- \ - vault operator init -format=json -n 1 -t 1 | \ - jq -r '.unseal_keys_b64[0]') - [ "${token}" != "" ] - - # Vault Unseal - local pods=($(kubectl get pods --selector='app.kubernetes.io/name=vault' -o json | jq -r '.items[].metadata.name')) - for pod in "${pods[@]}" - do - kubectl exec -ti ${pod} -- vault operator unseal ${token} - done - - wait_for_ready "$(name_prefix)-0" - - # Unsealed, initialized - local sealed_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.sealed' ) - [ "${sealed_status}" == "false" ] - - local init_status=$(kubectl exec "$(name_prefix)-0" -- vault status -format=json | - jq -r '.initialized') - [ "${init_status}" == "true" ] -} - -# Clean up -teardown() { - if [[ ${CLEANUP:-true} == "true" ]] - then - echo "helm/pvc teardown" - helm delete vault - kubectl delete --all pvc - kubectl delete namespace acceptance --ignore-not-found=true - fi -} diff --git a/charts/vkpr/vault-helm/test/docker/Test.dockerfile b/charts/vkpr/vault-helm/test/docker/Test.dockerfile deleted file mode 100644 index 003a06fa..00000000 --- a/charts/vkpr/vault-helm/test/docker/Test.dockerfile +++ /dev/null @@ -1,45 +0,0 @@ -# This Dockerfile installs all the dependencies necessary to run the unit and -# acceptance tests. This image also contains gcloud so you can run tests -# against a GKE cluster easily. -# -# This image has no automatic entrypoint. It is expected that you'll run -# a script to configure kubectl, potentially install Helm, and run the tests -# manually. This image only has the dependencies pre-installed. - -FROM alpine:latest -WORKDIR /root - -ENV BATS_VERSION "1.1.0" - -# base packages -RUN apk update && apk add --no-cache --virtual .build-deps \ - ca-certificates \ - curl \ - tar \ - bash \ - openssl \ - python \ - py-pip \ - git \ - jq - -# yq -RUN pip install yq - -# gcloud -RUN curl -OL https://dl.google.com/dl/cloudsdk/channels/rapid/install_google_cloud_sdk.bash && \ - bash install_google_cloud_sdk.bash --disable-prompts --install-dir='/root/' && \ - ln -s /root/google-cloud-sdk/bin/gcloud /usr/local/bin/gcloud - -# kubectl -RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \ - chmod +x ./kubectl && \ - mv ./kubectl /usr/local/bin/kubectl - -# helm -RUN curl https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3 | bash - -# bats -RUN curl -sSL https://github.com/bats-core/bats-core/archive/v${BATS_VERSION}.tar.gz -o /tmp/bats.tgz \ - && tar -zxf /tmp/bats.tgz -C /tmp \ - && /bin/bash /tmp/bats-core-$BATS_VERSION/install.sh /usr/local diff --git a/charts/vkpr/vault-helm/test/terraform/.gitignore b/charts/vkpr/vault-helm/test/terraform/.gitignore deleted file mode 100644 index d6800621..00000000 --- a/charts/vkpr/vault-helm/test/terraform/.gitignore +++ /dev/null @@ -1 +0,0 @@ -vault-helm-dev-creds.json diff --git a/charts/vkpr/vault-helm/test/terraform/main.tf b/charts/vkpr/vault-helm/test/terraform/main.tf deleted file mode 100644 index e3fc2efc..00000000 --- a/charts/vkpr/vault-helm/test/terraform/main.tf +++ /dev/null @@ -1,89 +0,0 @@ -provider "google" { - project = "${var.project}" - region = "us-central1" - - credentials = "${file("vault-helm-dev-creds.json")}" -} - -resource "random_id" "suffix" { - byte_length = 4 -} - -data "google_container_engine_versions" "main" { - location = "${var.zone}" - version_prefix = "1.15." -} - -data "google_service_account" "gcpapi" { - account_id = "${var.gcp_service_account}" -} - -resource "google_kms_key_ring" "keyring" { - name = "vault-helm-unseal-kr" - location = "global" -} - -resource "google_kms_crypto_key" "vault-helm-unseal-key" { - name = "vault-helm-unseal-key" - key_ring = "${google_kms_key_ring.keyring.self_link}" - - lifecycle { - prevent_destroy = true - } -} - -resource "google_container_cluster" "cluster" { - name = "vault-helm-dev-${random_id.suffix.dec}" - project = "${var.project}" - enable_legacy_abac = true - initial_node_count = 3 - location = "${var.zone}" - min_master_version = "${data.google_container_engine_versions.main.latest_master_version}" - node_version = "${data.google_container_engine_versions.main.latest_node_version}" - - node_config { - #service account for nodes to use - oauth_scopes = [ - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/compute", - "https://www.googleapis.com/auth/devstorage.read_write", - "https://www.googleapis.com/auth/logging.write", - "https://www.googleapis.com/auth/monitoring", - "https://www.googleapis.com/auth/service.management.readonly", - "https://www.googleapis.com/auth/servicecontrol", - "https://www.googleapis.com/auth/trace.append", - ] - - service_account = "${data.google_service_account.gcpapi.email}" - } -} - -resource "null_resource" "kubectl" { - count = "${var.init_cli ? 1 : 0 }" - - triggers = { - cluster = "${google_container_cluster.cluster.id}" - } - - # On creation, we want to setup the kubectl credentials. The easiest way - # to do this is to shell out to gcloud. - provisioner "local-exec" { - command = "gcloud container clusters get-credentials --zone=${var.zone} ${google_container_cluster.cluster.name}" - } - - # On destroy we want to try to clean up the kubectl credentials. This - # might fail if the credentials are already cleaned up or something so we - # want this to continue on failure. Generally, this works just fine since - # it only operates on local data. - provisioner "local-exec" { - when = "destroy" - on_failure = "continue" - command = "kubectl config get-clusters | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-cluster" - } - - provisioner "local-exec" { - when = "destroy" - on_failure = "continue" - command = "kubectl config get-contexts | grep ${google_container_cluster.cluster.name} | xargs -n1 kubectl config delete-context" - } -} diff --git a/charts/vkpr/vault-helm/test/terraform/outputs.tf b/charts/vkpr/vault-helm/test/terraform/outputs.tf deleted file mode 100644 index 6435d2b7..00000000 --- a/charts/vkpr/vault-helm/test/terraform/outputs.tf +++ /dev/null @@ -1,7 +0,0 @@ -output "cluster_id" { - value = "${google_container_cluster.cluster.id}" -} - -output "cluster_name" { - value = "${google_container_cluster.cluster.name}" -} diff --git a/charts/vkpr/vault-helm/test/terraform/variables.tf b/charts/vkpr/vault-helm/test/terraform/variables.tf deleted file mode 100644 index 971af4e5..00000000 --- a/charts/vkpr/vault-helm/test/terraform/variables.tf +++ /dev/null @@ -1,28 +0,0 @@ -variable "project" { - default = "vault-helm-dev-246514" - - description = < 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "injector/ClusterRole: disable with global.enabled" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/injector-clusterrole.yaml \ - --set 'global.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/injector-clusterrolebinding.bats b/charts/vkpr/vault-helm/test/unit/injector-clusterrolebinding.bats deleted file mode 100755 index 6e217878..00000000 --- a/charts/vkpr/vault-helm/test/unit/injector-clusterrolebinding.bats +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "injector/ClusterRoleBinding: enabled by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-clusterrolebinding.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "injector/ClusterRoleBinding: disable with global.enabled" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/injector-clusterrolebinding.yaml \ - --set 'global.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/injector-deployment.bats b/charts/vkpr/vault-helm/test/unit/injector-deployment.bats deleted file mode 100755 index bd3f63a0..00000000 --- a/charts/vkpr/vault-helm/test/unit/injector-deployment.bats +++ /dev/null @@ -1,449 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "injector/deployment: default injector.enabled" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "injector/deployment: enable with injector.enabled true" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "injector/deployment: disable with global.enabled" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'global.enabled=false' \ - --set 'injector.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "injector/deployment: image defaults to injector.image" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.image.repository=foo' \ - --set 'injector.image.tag=1.2.3' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "foo:1.2.3" ] - - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.image.repository=foo' \ - --set 'injector.image.tag=1.2.3' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "foo:1.2.3" ] -} - -@test "injector/deployment: default imagePullPolicy" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].imagePullPolicy' | tee /dev/stderr) - [ "${actual}" = "IfNotPresent" ] -} - -@test "injector/deployment: default resources" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "injector/deployment: custom resources" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.enabled=true' \ - --set 'injector.resources.requests.memory=256Mi' \ - --set 'injector.resources.requests.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.requests.memory' | tee /dev/stderr) - [ "${actual}" = "256Mi" ] - - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.enabled=true' \ - --set 'injector.resources.limits.memory=256Mi' \ - --set 'injector.resources.limits.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.limits.memory' | tee /dev/stderr) - [ "${actual}" = "256Mi" ] - - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.enabled=true' \ - --set 'injector.resources.requests.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.requests.cpu' | tee /dev/stderr) - [ "${actual}" = "250m" ] - - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.enabled=true' \ - --set 'injector.resources.limits.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.limits.cpu' | tee /dev/stderr) - [ "${actual}" = "250m" ] -} - -@test "injector/deployment: manual TLS environment vars" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.certs.secretName=foobar' \ - --set 'injector.certs.certName=test.crt' \ - --set 'injector.certs.keyName=test.key' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[5].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_TLS_CERT_FILE" ] - - local actual=$(echo $object | - yq -r '.[5].value' | tee /dev/stderr) - [ "${actual}" = "/etc/webhook/certs/test.crt" ] - - local actual=$(echo $object | - yq -r '.[6].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_TLS_KEY_FILE" ] - - local actual=$(echo $object | - yq -r '.[6].value' | tee /dev/stderr) - [ "${actual}" = "/etc/webhook/certs/test.key" ] -} - -@test "injector/deployment: auto TLS by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].volumeMounts | length' | tee /dev/stderr) - [ "${actual}" = "0" ] - - local object=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[5].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_TLS_AUTO" ] - - local actual=$(echo $object | - yq -r '.[6].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_TLS_AUTO_HOSTS" ] -} - -@test "injector/deployment: with externalVaultAddr" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[2].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_VAULT_ADDR" ] - - local actual=$(echo $object | - yq -r '.[2].value' | tee /dev/stderr) - [ "${actual}" = "http://vault-outside" ] -} - -@test "injector/deployment: without externalVaultAddr" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --release-name not-external-test \ - --namespace default \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[2].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_VAULT_ADDR" ] - - local actual=$(echo $object | - yq -r '.[2].value' | tee /dev/stderr) - [ "${actual}" = "http://not-external-test-vault.default.svc:8200" ] -} - -@test "injector/deployment: default authPath" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[3].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_VAULT_AUTH_PATH" ] - - local actual=$(echo $object | - yq -r '.[3].value' | tee /dev/stderr) - [ "${actual}" = "auth/kubernetes" ] -} - -@test "injector/deployment: custom authPath" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.authPath=auth/k8s' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[3].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_VAULT_AUTH_PATH" ] - - local actual=$(echo $object | - yq -r '.[3].value' | tee /dev/stderr) - [ "${actual}" = "auth/k8s" ] -} - -@test "injector/deployment: default logLevel" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[1].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_LOG_LEVEL" ] - - local actual=$(echo $object | - yq -r '.[1].value' | tee /dev/stderr) - [ "${actual}" = "info" ] -} - -@test "injector/deployment: custom logLevel" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.logLevel=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[1].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_LOG_LEVEL" ] - - local actual=$(echo $object | - yq -r '.[1].value' | tee /dev/stderr) - [ "${actual}" = "foo" ] -} - -@test "injector/deployment: default logFormat" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[7].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_LOG_FORMAT" ] - - local actual=$(echo $object | - yq -r '.[7].value' | tee /dev/stderr) - [ "${actual}" = "standard" ] -} - -@test "injector/deployment: custom logFormat" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.logFormat=json' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[7].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_LOG_FORMAT" ] - - local actual=$(echo $object | - yq -r '.[7].value' | tee /dev/stderr) - [ "${actual}" = "json" ] -} - -@test "injector/deployment: default revoke on shutdown" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[8].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_REVOKE_ON_SHUTDOWN" ] - - local actual=$(echo $object | - yq -r '.[8].value' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "injector/deployment: custom revoke on shutdown" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.revokeOnShutdown=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[8].name' | tee /dev/stderr) - [ "${actual}" = "AGENT_INJECT_REVOKE_ON_SHUTDOWN" ] - - local actual=$(echo $object | - yq -r '.[8].value' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# extraEnvironmentVars - -@test "injector/deployment: set extraEnvironmentVars" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.extraEnvironmentVars.FOO=bar' \ - --set 'injector.extraEnvironmentVars.FOOBAR=foobar' \ - --set 'injector.extraEnvironmentVars.lower\.case=sanitized' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[9].name' | tee /dev/stderr) - [ "${actual}" = "FOO" ] - - local actual=$(echo $object | - yq -r '.[9].value' | tee /dev/stderr) - [ "${actual}" = "bar" ] - - local actual=$(echo $object | - yq -r '.[10].name' | tee /dev/stderr) - [ "${actual}" = "FOOBAR" ] - - local actual=$(echo $object | - yq -r '.[10].value' | tee /dev/stderr) - [ "${actual}" = "foobar" ] - - local actual=$(echo $object | - yq -r '.[11].name' | tee /dev/stderr) - [ "${actual}" = "LOWER_CASE" ] - - local actual=$(echo $object | - yq -r '.[11].value' | tee /dev/stderr) - [ "${actual}" = "sanitized" ] -} - -#-------------------------------------------------------------------- -# affinity - -@test "injector/deployment: affinity not set by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq '.spec.template.spec | .affinity? == null' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "injector/deployment: affinity can be set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.affinity=foobar' \ - . | tee /dev/stderr | - yq '.spec.template.spec.affinity == "foobar"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# tolerations - -@test "injector/deployment: tolerations not set by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq '.spec.template.spec | .tolerations? == null' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "injector/deployment: tolerations can be set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.tolerations=foobar' \ - . | tee /dev/stderr | - yq '.spec.template.spec.tolerations == "foobar"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# nodeSelector - -@test "injector/deployment: nodeSelector is not set by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "injector/deployment: nodeSelector can be set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.nodeSelector=testing' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) - [ "${actual}" = "testing" ] -} - -#-------------------------------------------------------------------- -# priorityClassName - -@test "injector/deployment: priorityClassName not set by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - . | tee /dev/stderr | - yq '.spec.template.spec | .priorityClassName? == null' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "injector/deployment: priorityClassName can be set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-deployment.yaml \ - --set 'injector.priorityClassName=armaggeddon' \ - . | tee /dev/stderr | - yq '.spec.template.spec | .priorityClassName == "armaggeddon"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/injector-mutating-webhook.bats b/charts/vkpr/vault-helm/test/unit/injector-mutating-webhook.bats deleted file mode 100755 index 2eefcf2a..00000000 --- a/charts/vkpr/vault-helm/test/unit/injector-mutating-webhook.bats +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "injector/MutatingWebhookConfiguration: enabled by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-mutating-webhook.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "injector/MutatingWebhookConfiguration: disable with global.enabled false" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/injector-mutating-webhook.yaml \ - --set 'global.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "injector/MutatingWebhookConfiguration: disable with injector.enabled false" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/injector-mutating-webhook.yaml \ - --set 'injector.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "injector/MutatingWebhookConfiguration: namespace is set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-mutating-webhook.yaml \ - --set 'injector.enabled=true' \ - --namespace foo \ - . | tee /dev/stderr | - yq '.webhooks[0].clientConfig.service.namespace' | tee /dev/stderr) - [ "${actual}" = "\"foo\"" ] -} - -@test "injector/MutatingWebhookConfiguration: caBundle is empty" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-mutating-webhook.yaml \ - --set 'injector.enabled=true' \ - --namespace foo \ - . | tee /dev/stderr | - yq '.webhooks[0].clientConfig.caBundle' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "injector/MutatingWebhookConfiguration: namespaceSelector empty by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-mutating-webhook.yaml \ - --set 'injector.enabled=true' \ - --namespace foo \ - . | tee /dev/stderr | - yq '.webhooks[0].namespaceSelector' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "injector/MutatingWebhookConfiguration: can set namespaceSelector" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-mutating-webhook.yaml \ - --set 'injector.enabled=true' \ - --set 'injector.namespaceSelector.matchLabels.injector=true' \ - . | tee /dev/stderr | - yq '.webhooks[0].namespaceSelector.matchLabels.injector' | tee /dev/stderr) - - [ "${actual}" = "true" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/injector-service.bats b/charts/vkpr/vault-helm/test/unit/injector-service.bats deleted file mode 100755 index af8787da..00000000 --- a/charts/vkpr/vault-helm/test/unit/injector-service.bats +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "injector/Service: service enabled by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-service.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(helm template \ - --show-only templates/injector-service.yaml \ - --set 'injector.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "injector/Service: disable with global.enabled false" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/injector-service.yaml \ - --set 'global.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/injector-service.yaml \ - --set 'global.enabled=false' \ - --set 'injector.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/injector-serviceaccount.bats b/charts/vkpr/vault-helm/test/unit/injector-serviceaccount.bats deleted file mode 100755 index 1055d906..00000000 --- a/charts/vkpr/vault-helm/test/unit/injector-serviceaccount.bats +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "injector/ServiceAccount: enabled by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/injector-serviceaccount.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "injector/ServiceAccount: disable with global.enabled" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/injector-serviceaccount.yaml \ - --set 'global.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/server-clusterrolebinding.bats b/charts/vkpr/vault-helm/test/unit/server-clusterrolebinding.bats deleted file mode 100755 index d0d2acf8..00000000 --- a/charts/vkpr/vault-helm/test/unit/server-clusterrolebinding.bats +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/ClusterRoleBinding: enabled by default" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-clusterrolebinding.yaml \ - --set 'server.dev.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$( (helm template \ - --show-only templates/server-clusterrolebinding.yaml \ - --set 'server.ha.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$( (helm template \ - --show-only templates/server-clusterrolebinding.yaml \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/ClusterRoleBinding: disable with global.enabled" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-clusterrolebinding.yaml \ - --set 'global.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/ClusterRoleBinding: can disable with server.authDelegator" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-clusterrolebinding.yaml \ - --set 'server.authDelegator.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-clusterrolebinding.yaml \ - --set 'server.authDelegator.enabled=false' \ - --set 'server.ha.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-clusterrolebinding.yaml \ - --set 'server.authDelegator.enabled=false' \ - --set 'server.dev.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/ClusterRoleBinding: disable with injector.externalVaultAddr" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-clusterrolebinding.yaml \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/server-configmap.bats b/charts/vkpr/vault-helm/test/unit/server-configmap.bats deleted file mode 100755 index fe2ac125..00000000 --- a/charts/vkpr/vault-helm/test/unit/server-configmap.bats +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/ConfigMap: enabled by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-config-configmap.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(helm template \ - --show-only templates/server-config-configmap.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(helm template \ - --show-only templates/server-config-configmap.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.ha.raft.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(helm template \ - --show-only templates/server-config-configmap.yaml \ - --set 'server.standalone.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/ConfigMap: raft config disabled by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-config-configmap.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - grep "raft" | yq 'length > 0' | tee /dev/stderr) - [ "${actual}" != "true" ] -} - -@test "server/ConfigMap: raft config can be enabled" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-config-configmap.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.ha.raft.enabled=true' \ - . | tee /dev/stderr | - grep "raft" | yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - - -@test "server/ConfigMap: disabled by server.dev.enabled true" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-config-configmap.yaml \ - --set 'server.dev.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/ConfigMap: disable with global.enabled" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-config-configmap.yaml \ - --set 'global.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/ConfigMap: standalone extraConfig is set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-config-configmap.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.standalone.config="{\"hello\": \"world\"}"' \ - . | tee /dev/stderr | - yq '.data["extraconfig-from-values.hcl"] | match("world") | length' | tee /dev/stderr) - [ ! -z "${actual}" ] - - local actual=$(helm template \ - --show-only templates/server-config-configmap.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.standalone.config="{\"foo\": \"bar\"}"' \ - . | tee /dev/stderr | - yq '.data["extraconfig-from-values.hcl"] | match("bar") | length' | tee /dev/stderr) - [ ! -z "${actual}" ] -} - -@test "server/ConfigMap: ha extraConfig is set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-config-configmap.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.ha.config="{\"hello\": \"world\"}"' \ - . | tee /dev/stderr | - yq '.data["extraconfig-from-values.hcl"] | match("world") | length' | tee /dev/stderr) - [ ! -z "${actual}" ] - - local actual=$(helm template \ - --show-only templates/server-config-configmap.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.ha.config="{\"foo\": \"bar\"}"' \ - . | tee /dev/stderr | - yq '.data["extraconfig-from-values.hcl"] | match("bar") | length' | tee /dev/stderr) - [ ! -z "${actual}" ] -} - -@test "server/ConfigMap: disabled by injector.externalVaultAddr" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-config-configmap.yaml \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/server-dev-statefulset.bats b/charts/vkpr/vault-helm/test/unit/server-dev-statefulset.bats deleted file mode 100755 index 3b38eab4..00000000 --- a/charts/vkpr/vault-helm/test/unit/server-dev-statefulset.bats +++ /dev/null @@ -1,403 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/dev-StatefulSet: enable with server.dev.enabled true" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/dev-StatefulSet: disable with global.enabled" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'global.enabled=false' \ - --set 'server.dev.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/dev-StatefulSet: disable with injector.externalVaultAddr" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - --set 'server.dev.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/dev-StatefulSet: image defaults to server.image.repository:tag" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.image.repository=foo' \ - --set 'server.image.tag=1.2.3' \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "foo:1.2.3" ] -} - -@test "server/ha-StatefulSet: image tag defaults to latest" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.image.repository=foo' \ - --set 'server.image.tag=' \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "foo:latest" ] -} - -#-------------------------------------------------------------------- -# replicas - -@test "server/dev-StatefulSet: default replicas" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.replicas' | tee /dev/stderr) - [ "${actual}" = "1" ] -} - -@test "server/dev-StatefulSet: cant set replicas" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.dev.replicas=100' \ - . | tee /dev/stderr | - yq -r '.spec.replicas' | tee /dev/stderr) - [ "${actual}" = "1" ] -} - -#-------------------------------------------------------------------- -# updateStrategy - -@test "server/dev-StatefulSet: updateStrategy" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.updateStrategy.type' | tee /dev/stderr) - [ "${actual}" = "OnDelete" ] -} - -#-------------------------------------------------------------------- -# resources - -@test "server/dev-StatefulSet: default resources" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "server/dev-StatefulSet: custom resources" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.resources.requests.memory=256Mi' \ - --set 'server.resources.requests.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.requests.memory' | tee /dev/stderr) - [ "${actual}" = "256Mi" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.resources.limits.memory=256Mi' \ - --set 'server.resources.limits.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.limits.memory' | tee /dev/stderr) - [ "${actual}" = "256Mi" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.resources.requests.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.requests.cpu' | tee /dev/stderr) - [ "${actual}" = "250m" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.resources.limits.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.limits.cpu' | tee /dev/stderr) - [ "${actual}" = "250m" ] -} - -#-------------------------------------------------------------------- -# extraVolumes - -@test "server/dev-StatefulSet: adds extra volume" { - cd `chart_dir` - - # Test that it defines it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.volumes[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.configMap.name' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - local actual=$(echo $object | - yq -r '.configMap.secretName' | tee /dev/stderr) - [ "${actual}" = "null" ] - - # Test that it mounts it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.readOnly' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.mountPath' | tee /dev/stderr) - [ "${actual}" = "/vault/userconfig/foo" ] -} - -@test "server/dev-StatefulSet: adds extra secret volume" { - cd `chart_dir` - - # Test that it defines it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.extraVolumes[0].type=secret' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.volumes[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.secret.name' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(echo $object | - yq -r '.secret.secretName' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - # Test that it mounts it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.readOnly' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.mountPath' | tee /dev/stderr) - [ "${actual}" = "/vault/userconfig/foo" ] -} - -@test "server/dev-StatefulSet: no storageClass on claim by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates[0].spec.storageClassName' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -#-------------------------------------------------------------------- -# extraEnvironmentVars - -@test "server/dev-StatefulSet: set extraEnvironmentVars" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.extraEnvironmentVars.FOO=bar' \ - --set 'server.extraEnvironmentVars.FOOBAR=foobar' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[11].name' | tee /dev/stderr) - [ "${actual}" = "FOO" ] - - local actual=$(echo $object | - yq -r '.[11].value' | tee /dev/stderr) - [ "${actual}" = "bar" ] - - local actual=$(echo $object | - yq -r '.[12].name' | tee /dev/stderr) - [ "${actual}" = "FOOBAR" ] - - local actual=$(echo $object | - yq -r '.[12].value' | tee /dev/stderr) - [ "${actual}" = "foobar" ] -} - -#-------------------------------------------------------------------- -# extraSecretEnvironmentVars - -@test "server/dev-StatefulSet: set extraSecretEnvironmentVars" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.extraSecretEnvironmentVars[0].envName=ENV_FOO_0' \ - --set 'server.extraSecretEnvironmentVars[0].secretName=secret_name_0' \ - --set 'server.extraSecretEnvironmentVars[0].secretKey=secret_key_0' \ - --set 'server.extraSecretEnvironmentVars[1].envName=ENV_FOO_1' \ - --set 'server.extraSecretEnvironmentVars[1].secretName=secret_name_1' \ - --set 'server.extraSecretEnvironmentVars[1].secretKey=secret_key_1' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[10].name' | tee /dev/stderr) - [ "${actual}" = "ENV_FOO_0" ] - local actual=$(echo $object | - yq -r '.[10].valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "secret_name_0" ] - local actual=$(echo $object | - yq -r '.[10].valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "secret_key_0" ] - - local actual=$(echo $object | - yq -r '.[11].name' | tee /dev/stderr) - [ "${actual}" = "ENV_FOO_1" ] - local actual=$(echo $object | - yq -r '.[11].valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "secret_name_1" ] - local actual=$(echo $object | - yq -r '.[11].valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "secret_key_1" ] -} - -#-------------------------------------------------------------------- -# storage class - -@test "server/dev-StatefulSet: can't set storageClass" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.dataStorage.enabled=true' \ - --set 'server.dataStorage.storageClass=foo' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.auditStorage.storageClass=foo' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.auditStorage.storageClass=foo' \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.auditStorage.storageClass=foo' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -#-------------------------------------------------------------------- -# Security Contexts -@test "server/dev-StatefulSet: uid default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.runAsUser' | tee /dev/stderr) - [ "${actual}" = "100" ] -} - -@test "server/dev-StatefulSet: uid configurable" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.uid=2000' \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.runAsUser' | tee /dev/stderr) - [ "${actual}" = "2000" ] -} - -@test "server/dev-StatefulSet: gid default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.runAsGroup' | tee /dev/stderr) - [ "${actual}" = "1000" ] -} - -@test "server/dev-StatefulSet: gid configurable" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.gid=2000' \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.runAsGroup' | tee /dev/stderr) - [ "${actual}" = "2000" ] -} - -@test "server/dev-StatefulSet: fsgroup default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.fsGroup' | tee /dev/stderr) - [ "${actual}" = "1000" ] -} - -@test "server/dev-StatefulSet: fsgroup configurable" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.gid=2000' \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.fsGroup' | tee /dev/stderr) - [ "${actual}" = "2000" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/server-ha-active-service.bats b/charts/vkpr/vault-helm/test/unit/server-ha-active-service.bats deleted file mode 100644 index 4e6ad1a9..00000000 --- a/charts/vkpr/vault-helm/test/unit/server-ha-active-service.bats +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/ha-active-Service: generic annotations" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-ha-active-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.service.annotations=vaultIsAwesome: true' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["vaultIsAwesome"]' | tee /dev/stderr) - [ "${actual}" = "true" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/server-ha-disruptionbudget.bats b/charts/vkpr/vault-helm/test/unit/server-ha-disruptionbudget.bats deleted file mode 100755 index f3c329ec..00000000 --- a/charts/vkpr/vault-helm/test/unit/server-ha-disruptionbudget.bats +++ /dev/null @@ -1,87 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/DisruptionBudget: enabled by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-disruptionbudget.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/DisruptionBudget: disable with server.enabled" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-disruptionbudget.yaml \ - --set 'globa.enabled=false' \ - --set 'server.ha.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/DisruptionBudget: disable with server.disruptionBudget.enabled" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-disruptionbudget.yaml \ - --set 'server.ha.disruptionBudget.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/DisruptionBudget: disable with global.enabled" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-disruptionbudget.yaml \ - --set 'global.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/DisruptionBudget: disable with injector.exernalVaultAddr" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-disruptionbudget.yaml \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/DisruptionBudget: correct maxUnavailable with n=1" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-disruptionbudget.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.ha.replicas=1' \ - . | tee /dev/stderr | - yq '.spec.maxUnavailable' | tee /dev/stderr) - [ "${actual}" = "0" ] -} - -@test "server/DisruptionBudget: correct maxUnavailable with n=3" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-disruptionbudget.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.ha.replicas=3' \ - . | tee /dev/stderr | - yq '.spec.maxUnavailable' | tee /dev/stderr) - [ "${actual}" = "1" ] -} - -@test "server/DisruptionBudget: correct maxUnavailable with n=5" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-disruptionbudget.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.ha.replicas=5' \ - . | tee /dev/stderr | - yq '.spec.maxUnavailable' | tee /dev/stderr) - [ "${actual}" = "2" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/server-ha-standby-service.bats b/charts/vkpr/vault-helm/test/unit/server-ha-standby-service.bats deleted file mode 100644 index f2f0043b..00000000 --- a/charts/vkpr/vault-helm/test/unit/server-ha-standby-service.bats +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/ha-standby-Service: generic annotations string" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-ha-standby-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.service.annotations=vaultIsAwesome: true' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["vaultIsAwesome"]' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/ha-standby-Service: generic annotations yaml" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-ha-standby-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.service.annotations.vaultIsAwesome=true' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["vaultIsAwesome"]' | tee /dev/stderr) - [ "${actual}" = "true" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/server-ha-statefulset.bats b/charts/vkpr/vault-helm/test/unit/server-ha-statefulset.bats deleted file mode 100755 index e6d0d588..00000000 --- a/charts/vkpr/vault-helm/test/unit/server-ha-statefulset.bats +++ /dev/null @@ -1,645 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/ha-StatefulSet: enable with server.ha.enabled true" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/ha-StatefulSet: disable with global.enabled" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'global.enabled=false' \ - --set 'server.ha.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/ha-StatefulSet: disable with injector.externalVaultAddr" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - --set 'server.ha.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/ha-StatefulSet: image defaults to server.image.repository:tag" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.image.repository=foo' \ - --set 'server.image.tag=1.2.3' \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "foo:1.2.3" ] -} - -@test "server/ha-StatefulSet: image tag defaults to latest" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.image.repository=foo' \ - --set 'server.image.tag=' \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "foo:latest" ] -} - -#-------------------------------------------------------------------- -# TLS - -@test "server/ha-StatefulSet: tls disabled" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'global.tlsDisable=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[4].name' | tee /dev/stderr) - [ "${actual}" = "VAULT_ADDR" ] - - local actual=$(echo $object | - yq -r '.[4].value' | tee /dev/stderr) - [ "${actual}" = "http://127.0.0.1:8200" ] -} -@test "server/ha-StatefulSet: tls enabled" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'global.tlsDisable=false' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[4].name' | tee /dev/stderr) - [ "${actual}" = "VAULT_ADDR" ] - - local actual=$(echo $object | - yq -r '.[4].value' | tee /dev/stderr) - [ "${actual}" = "https://127.0.0.1:8200" ] -} - -#-------------------------------------------------------------------- -# updateStrategy - -@test "server/ha-StatefulSet: OnDelete updateStrategy" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.updateStrategy.type' | tee /dev/stderr) - [ "${actual}" = "OnDelete" ] -} - -@test "server/ha-StatefulSet: RollingUpdate updateStrategy" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.updateStrategyType="RollingUpdate"' \ - . | tee /dev/stderr | - yq -r '.spec.updateStrategy.type' | tee /dev/stderr) - [ "${actual}" = "RollingUpdate" ] -} - -#-------------------------------------------------------------------- -# affinity - -@test "server/ha-StatefulSet: default affinity" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.affinity' | tee /dev/stderr) - [ "${actual}" != "null" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.affinity=' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.affinity' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -#-------------------------------------------------------------------- -# replicas - -@test "server/ha-StatefulSet: default replicas" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.replicas' | tee /dev/stderr) - [ "${actual}" = "3" ] -} - -@test "server/ha-StatefulSet: custom replicas" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.ha.replicas=10' \ - . | tee /dev/stderr | - yq -r '.spec.replicas' | tee /dev/stderr) - [ "${actual}" = "10" ] -} - -#-------------------------------------------------------------------- -# resources - -@test "server/ha-StatefulSet: default resources" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "server/ha-StatefulSet: custom resources" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.resources.requests.memory=256Mi' \ - --set 'server.resources.requests.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.requests.memory' | tee /dev/stderr) - [ "${actual}" = "256Mi" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.resources.limits.memory=256Mi' \ - --set 'server.resources.limits.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.limits.memory' | tee /dev/stderr) - [ "${actual}" = "256Mi" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.resources.requests.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.requests.cpu' | tee /dev/stderr) - [ "${actual}" = "250m" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.resources.limits.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.limits.cpu' | tee /dev/stderr) - [ "${actual}" = "250m" ] -} - -#-------------------------------------------------------------------- -# extraVolumes - -@test "server/ha-StatefulSet: adds extra volume" { - cd `chart_dir` - # Test that it defines it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.volumes[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.configMap.name' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - local actual=$(echo $object | - yq -r '.configMap.secretName' | tee /dev/stderr) - [ "${actual}" = "null" ] - - # Test that it mounts it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.readOnly' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.mountPath' | tee /dev/stderr) - [ "${actual}" = "/vault/userconfig/foo" ] -} - -@test "server/ha-StatefulSet: adds extra volume custom mount path" { - cd `chart_dir` - # Test that it mounts it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - --set 'server.extraVolumes[0].path=/custom/path' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.readOnly' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.mountPath' | tee /dev/stderr) - [ "${actual}" = "/custom/path/foo" ] -} - -@test "server/ha-StatefulSet: adds extra secret volume custom mount path" { - cd `chart_dir` - - # Test that it mounts it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - --set 'server.extraVolumes[0].path=/custom/path' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.readOnly' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.mountPath' | tee /dev/stderr) - [ "${actual}" = "/custom/path/foo" ] -} - -@test "server/ha-StatefulSet: adds extra secret volume" { - cd `chart_dir` - - # Test that it defines it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.extraVolumes[0].type=secret' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.volumes[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.secret.name' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(echo $object | - yq -r '.secret.secretName' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - # Test that it mounts it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.readOnly' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.mountPath' | tee /dev/stderr) - [ "${actual}" = "/vault/userconfig/foo" ] -} - -#-------------------------------------------------------------------- -# extraEnvironmentVars - -@test "server/ha-StatefulSet: set extraEnvironmentVars" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.extraEnvironmentVars.FOO=bar' \ - --set 'server.extraEnvironmentVars.FOOBAR=foobar' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[10].name' | tee /dev/stderr) - [ "${actual}" = "FOO" ] - - local actual=$(echo $object | - yq -r '.[10].value' | tee /dev/stderr) - [ "${actual}" = "bar" ] - - local actual=$(echo $object | - yq -r '.[11].name' | tee /dev/stderr) - [ "${actual}" = "FOOBAR" ] - - local actual=$(echo $object | - yq -r '.[11].value' | tee /dev/stderr) - [ "${actual}" = "foobar" ] -} - -#-------------------------------------------------------------------- -# extraSecretEnvironmentVars - -@test "server/ha-StatefulSet: set extraSecretEnvironmentVars" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.extraSecretEnvironmentVars[0].envName=ENV_FOO_0' \ - --set 'server.extraSecretEnvironmentVars[0].secretName=secret_name_0' \ - --set 'server.extraSecretEnvironmentVars[0].secretKey=secret_key_0' \ - --set 'server.extraSecretEnvironmentVars[1].envName=ENV_FOO_1' \ - --set 'server.extraSecretEnvironmentVars[1].secretName=secret_name_1' \ - --set 'server.extraSecretEnvironmentVars[1].secretKey=secret_key_1' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[10].name' | tee /dev/stderr) - [ "${actual}" = "ENV_FOO_0" ] - local actual=$(echo $object | - yq -r '.[10].valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "secret_name_0" ] - local actual=$(echo $object | - yq -r '.[10].valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "secret_key_0" ] - - local actual=$(echo $object | - yq -r '.[11].name' | tee /dev/stderr) - [ "${actual}" = "ENV_FOO_1" ] - local actual=$(echo $object | - yq -r '.[11].valueFrom.secretKeyRef.name' | tee /dev/stderr) - [ "${actual}" = "secret_name_1" ] - local actual=$(echo $object | - yq -r '.[11].valueFrom.secretKeyRef.key' | tee /dev/stderr) - [ "${actual}" = "secret_key_1" ] -} - -#-------------------------------------------------------------------- -# VAULT_CLUSTER_ADDR renders - -@test "server/ha-StatefulSet: cluster addr renders" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.ha.raft.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[9].name' | tee /dev/stderr) - [ "${actual}" = "VAULT_CLUSTER_ADDR" ] - - local actual=$(echo $object | - yq -r '.[9].value' | tee /dev/stderr) - [ "${actual}" = 'https://$(HOSTNAME).RELEASE-NAME-vault-internal:8201' ] -} - -#-------------------------------------------------------------------- -# VAULT_RAFT_NODE_ID renders - -@test "server/ha-StatefulSet: raft node ID renders" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.ha.raft.enabled=true' \ - --set 'server.ha.raft.setNodeId=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[10].name' | tee /dev/stderr) - [ "${actual}" = "VAULT_RAFT_NODE_ID" ] - - local actual=$(echo $object | - yq -r '.[10].valueFrom.fieldRef.fieldPath' | tee /dev/stderr) - [ "${actual}" = 'metadata.name' ] -} - -#-------------------------------------------------------------------- -# storage class - -@test "server/ha-StatefulSet: no storage by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "0" ] -} - - -@test "server/ha-StatefulSet: cant set data storage" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.dataStorage.enabled=true' \ - --set 'server.dataStorage.storageClass=foo' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "server/ha-StatefulSet: can set storageClass" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.dataStorage.enabled=false' \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.auditStorage.storageClass=foo' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates[0].spec.storageClassName' | tee /dev/stderr) - [ "${actual}" = "foo" ] -} - -@test "server/ha-StatefulSet: can disable storage" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.auditStorage.enabled=false' \ - --set 'server.dataStorage.enabled=false' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "0" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.dataStorage.enabled=false' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "1" ] -} - -@test "server/ha-StatefulSet: can mount audit" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.auditStorage.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "audit")' | tee /dev/stderr) -} - -@test "server/ha-StatefulSet: no data storage" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.auditStorage.enabled=false' \ - --set 'server.dataStorage.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "0" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.dataStorage.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "1" ] -} - -@test "server/ha-StatefulSet: tolerations not set by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec | .tolerations? == null' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/ha-StatefulSet: tolerations can be set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.tolerations=foobar' \ - . | tee /dev/stderr | - yq '.spec.template.spec.tolerations == "foobar"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/ha-StatefulSet: nodeSelector is not set by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "server/ha-StatefulSet: specified nodeSelector" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.nodeSelector=testing' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) - [ "${actual}" = "testing" ] -} - -#-------------------------------------------------------------------- -# Security Contexts -@test "server/ha-StatefulSet: uid default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.runAsUser' | tee /dev/stderr) - [ "${actual}" = "100" ] -} - -@test "server/ha-StatefulSet: uid configurable" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.uid=2000' \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.runAsUser' | tee /dev/stderr) - [ "${actual}" = "2000" ] -} - -@test "server/ha-StatefulSet: gid default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.runAsGroup' | tee /dev/stderr) - [ "${actual}" = "1000" ] -} - -@test "server/ha-StatefulSet: gid configurable" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.gid=2000' \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.runAsGroup' | tee /dev/stderr) - [ "${actual}" = "2000" ] -} - -@test "server/ha-StatefulSet: fsgroup default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.fsGroup' | tee /dev/stderr) - [ "${actual}" = "1000" ] -} - -@test "server/ha-StatefulSet: fsgroup configurable" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.gid=2000' \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.fsGroup' | tee /dev/stderr) - [ "${actual}" = "2000" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/server-ingress.bats b/charts/vkpr/vault-helm/test/unit/server-ingress.bats deleted file mode 100755 index 8660920d..00000000 --- a/charts/vkpr/vault-helm/test/unit/server-ingress.bats +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/ingress: disabled by default" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-ingress.yaml \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/ingress: disable by injector.externalVaultAddr" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-ingress.yaml \ - --set 'server.ingress.enabled=true' \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/ingress: checking host entry gets added and path is /" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-ingress.yaml \ - --set 'server.ingress.enabled=true' \ - --set 'server.ingress.hosts[0].host=test.com' \ - --set 'server.ingress.hosts[0].paths[0]=/' \ - . | tee /dev/stderr | - yq -r '.spec.rules[0].host' | tee /dev/stderr) - [ "${actual}" = 'test.com' ] - - local actual=$(helm template \ - --show-only templates/server-ingress.yaml \ - --set 'server.ingress.enabled=true' \ - --set 'server.ingress.hosts[0].host=test.com' \ - --set 'server.ingress.hosts[0].paths[0]=/' \ - . | tee /dev/stderr | - yq -r '.spec.rules[0].http.paths[0].path' | tee /dev/stderr) - [ "${actual}" = '/' ] -} - -@test "server/ingress: vault backend should be added when I specify a path" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/server-ingress.yaml \ - --set 'server.ingress.enabled=true' \ - --set 'server.ingress.hosts[0].host=test.com' \ - --set 'server.ingress.hosts[0].paths[0]=/' \ - . | tee /dev/stderr | - yq -r '.spec.rules[0].http.paths[0].backend.serviceName | length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] - -} - -@test "server/ingress: labels gets added to object" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/server-ingress.yaml \ - --set 'server.ingress.enabled=true' \ - --set 'server.ingress.labels.traffic=external' \ - --set 'server.ingress.labels.team=dev' \ - . | tee /dev/stderr | - yq -r '.metadata.labels.traffic' | tee /dev/stderr) - [ "${actual}" = "external" ] -} - -@test "server/ingress: annotations added to object - string" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/server-ingress.yaml \ - --set 'server.ingress.enabled=true' \ - --set 'server.ingress.annotations=kubernetes.io/ingress.class: nginx' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["kubernetes.io/ingress.class"]' | tee /dev/stderr) - [ "${actual}" = "nginx" ] -} - -@test "server/ingress: annotations added to object - yaml" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/server-ingress.yaml \ - --set 'server.ingress.enabled=true' \ - --set server.ingress.annotations."kubernetes\.io/ingress\.class"=nginx \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["kubernetes.io/ingress.class"]' | tee /dev/stderr) - [ "${actual}" = "nginx" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/server-service.bats b/charts/vkpr/vault-helm/test/unit/server-service.bats deleted file mode 100755 index 5821b91e..00000000 --- a/charts/vkpr/vault-helm/test/unit/server-service.bats +++ /dev/null @@ -1,412 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/Service: service enabled by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.standalone.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/Service: disable with global.enabled false" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'global.enabled=false' \ - --set 'server.service.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'global.enabled=false' \ - --set 'server.service.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'global.enabled=false' \ - --set 'server.service.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/Service: disable with server.service.enabled false" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.service.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.service.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.service.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/Service: disable with global.enabled false server.service.enabled false" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'global.enabled=false' \ - --set 'server.service.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'global.enabled=false' \ - --set 'server.service.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'global.enabled=false' \ - --set 'server.service.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/Service: disable with injector.externalVaultAddr" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - --set 'server.service.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - --set 'server.service.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - --set 'server.service.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -# This can be seen as testing just what we put into the YAML raw, but -# this is such an important part of making everything work we verify it here. -@test "server/Service: tolerates unready endpoints" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["service.alpha.kubernetes.io/tolerate-unready-endpoints"]' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["service.alpha.kubernetes.io/tolerate-unready-endpoints"]' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.standalone.enabled=true' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["service.alpha.kubernetes.io/tolerate-unready-endpoints"]' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/Service: generic annotations" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.service.annotations=vaultIsAwesome: true' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["vaultIsAwesome"]' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/Service: publish not ready" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.publishNotReadyAddresses' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.publishNotReadyAddresses' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.standalone.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.publishNotReadyAddresses' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/Service: type empty by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "server/Service: type can set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.service.type=NodePort' \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "NodePort" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.service.type=NodePort' \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "NodePort" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.service.type=NodePort' \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "NodePort" ] -} - -@test "server/Service: clusterIP empty by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.clusterIP' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.clusterIP' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - . | tee /dev/stderr | - yq -r '.spec.clusterIP' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "server/Service: clusterIP can set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.service.clusterIP=None' \ - . | tee /dev/stderr | - yq -r '.spec.clusterIP' | tee /dev/stderr) - [ "${actual}" = "None" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.service.clusterIP=None' \ - . | tee /dev/stderr | - yq -r '.spec.clusterIP' | tee /dev/stderr) - [ "${actual}" = "None" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.service.clusterIP=None' \ - . | tee /dev/stderr | - yq -r '.spec.clusterIP' | tee /dev/stderr) - [ "${actual}" = "None" ] -} - -@test "server/Service: port and targetPort will be 8200 by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - . | tee /dev/stderr | - yq -r '.spec.ports[0].port' | tee /dev/stderr) - [ "${actual}" = "8200" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - . | tee /dev/stderr | - yq -r '.spec.ports[0].targetPort' | tee /dev/stderr) - [ "${actual}" = "8200" ] -} - -@test "server/Service: port and targetPort can be set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.service.port=8000' \ - . | tee /dev/stderr | - yq -r '.spec.ports[0].port' | tee /dev/stderr) - [ "${actual}" = "8000" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.service.targetPort=80' \ - . | tee /dev/stderr | - yq -r '.spec.ports[0].targetPort' | tee /dev/stderr) - [ "${actual}" = "80" ] -} - -@test "server/Service: nodeport can set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.service.type=NodePort' \ - --set 'server.service.nodePort=30008' \ - . | tee /dev/stderr | - yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) - [ "${actual}" = "30008" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.service.type=NodePort' \ - --set 'server.service.nodePort=30009' \ - . | tee /dev/stderr | - yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) - [ "${actual}" = "30009" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.service.type=NodePort' \ - --set 'server.service.nodePort=30010' \ - . | tee /dev/stderr | - yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) - [ "${actual}" = "30010" ] -} - -@test "server/Service: nodeport can't set when type isn't NodePort" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.service.nodePort=30008' \ - . | tee /dev/stderr | - yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.service.nodePort=30009' \ - . | tee /dev/stderr | - yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.service.nodePort=30010' \ - . | tee /dev/stderr | - yq -r '.spec.ports[0].nodePort' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "server/Service: vault port name is http, when tlsDisable is true" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'global.tlsDisable=true' \ - . | tee /dev/stderr | - yq -r '.spec.ports | map(select(.port==8200)) | .[] .name' | tee /dev/stderr) - [ "${actual}" = "http" ] -} - -@test "server/Service: vault port name is https, when tlsDisable is false" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/server-service.yaml \ - --set 'global.tlsDisable=false' \ - . | tee /dev/stderr | - yq -r '.spec.ports | map(select(.port==8200)) | .[] .name' | tee /dev/stderr) - [ "${actual}" = "https" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/server-serviceaccount.bats b/charts/vkpr/vault-helm/test/unit/server-serviceaccount.bats deleted file mode 100755 index fe09c2ad..00000000 --- a/charts/vkpr/vault-helm/test/unit/server-serviceaccount.bats +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/ServiceAccount: specify annotations" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-serviceaccount.yaml \ - --set 'server.dev.enabled=true' \ - --set 'server.serviceAccount.annotations=foo: bar' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["foo"]' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(helm template \ - --show-only templates/server-serviceaccount.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.serviceAccount.annotations=foo: bar' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["foo"]' | tee /dev/stderr) - [ "${actual}" = "bar" ] - - local actual=$(helm template \ - --show-only templates/server-serviceaccount.yaml \ - --set 'server.ha.enabled=true' \ - --set 'server.serviceAccount.annotations.foo=bar' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["foo"]' | tee /dev/stderr) - [ "${actual}" = "bar" ] - - local actual=$(helm template \ - --show-only templates/server-serviceaccount.yaml \ - --set 'server.ha.enabled=true' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["foo"]' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "server/ServiceAccount: disable with global.enabled false" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'global.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'global.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'global.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/ServiceAccount: disable by injector.externalVaultAddr" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/server-service.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/server-statefulset.bats b/charts/vkpr/vault-helm/test/unit/server-statefulset.bats deleted file mode 100755 index 3fa7ba46..00000000 --- a/charts/vkpr/vault-helm/test/unit/server-statefulset.bats +++ /dev/null @@ -1,982 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "server/standalone-StatefulSet: default server.standalone.enabled" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/standalone-StatefulSet: enable with server.standalone.enabled true" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - . | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/standalone-StatefulSet: disable with global.enabled" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'global.enabled=false' \ - --set 'server.standalone.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/standalone-StatefulSet: disable with injector.externalVaultAddr" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - --set 'server.standalone.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "server/standalone-StatefulSet: image defaults to server.image.repository:tag" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.image.repository=foo' \ - --set 'server.image.tag=1.2.3' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "foo:1.2.3" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.image.repository=foo' \ - --set 'server.image.tag=1.2.3' \ - --set 'server.standalone.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "foo:1.2.3" ] -} - -@test "server/standalone-StatefulSet: image tag defaults to latest" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.image.repository=foo' \ - --set 'server.image.tag=' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "foo:latest" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.image.repository=foo' \ - --set 'server.image.tag=' \ - --set 'server.standalone.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].image' | tee /dev/stderr) - [ "${actual}" = "foo:latest" ] -} - -@test "server/standalone-StatefulSet: default imagePullPolicy" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].imagePullPolicy' | tee /dev/stderr) - [ "${actual}" = "IfNotPresent" ] -} - -@test "server/standalone-StatefulSet: Custom imagePullPolicy" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.image.pullPolicy=Always' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].imagePullPolicy' | tee /dev/stderr) - [ "${actual}" = "Always" ] -} - -@test "server/standalone-StatefulSet: Custom imagePullSecrets" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'global.imagePullSecrets[0].name=foo' \ - --set 'global.imagePullSecrets[1].name=bar' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.imagePullSecrets' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[0].name' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - local actual=$(echo $object | - yq -r '.[1].name' | tee /dev/stderr) - [ "${actual}" = "bar" ] -} - -@test "server/standalone-StatefulSet: default imagePullSecrets" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.imagePullSecrets' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -#-------------------------------------------------------------------- -# updateStrategy - -@test "server/standalone-StatefulSet: OnDelete updateStrategy" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq -r '.spec.updateStrategy.type' | tee /dev/stderr) - [ "${actual}" = "OnDelete" ] -} - -#-------------------------------------------------------------------- -# replicas - -@test "server/standalone-StatefulSet: default replicas" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.replicas' | tee /dev/stderr) - [ "${actual}" = "1" ] -} - -@test "server/standalone-StatefulSet: custom replicas" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.replicas=100' \ - . | tee /dev/stderr | - yq -r '.spec.replicas' | tee /dev/stderr) - [ "${actual}" = "1" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.standalone.replicas=100' \ - . | tee /dev/stderr | - yq -r '.spec.replicas' | tee /dev/stderr) - [ "${actual}" = "1" ] -} - -#-------------------------------------------------------------------- -# resources - -@test "server/standalone-StatefulSet: default resources" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "server/standalone-StatefulSet: custom resources" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.resources.requests.memory=256Mi' \ - --set 'server.resources.requests.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.requests.memory' | tee /dev/stderr) - [ "${actual}" = "256Mi" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.resources.limits.memory=256Mi' \ - --set 'server.resources.limits.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.limits.memory' | tee /dev/stderr) - [ "${actual}" = "256Mi" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.resources.requests.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.requests.cpu' | tee /dev/stderr) - [ "${actual}" = "250m" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.resources.limits.cpu=250m' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].resources.limits.cpu' | tee /dev/stderr) - [ "${actual}" = "250m" ] -} - -#-------------------------------------------------------------------- -# extraVolumes - -@test "server/standalone-StatefulSet: adds extra volume" { - cd `chart_dir` - - # Test that it defines it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.volumes[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.configMap.name' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - local actual=$(echo $object | - yq -r '.configMap.secretName' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.volumes[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.configMap.name' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - local actual=$(echo $object | - yq -r '.configMap.secretName' | tee /dev/stderr) - [ "${actual}" = "null" ] - - # Test that it mounts it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.readOnly' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.mountPath' | tee /dev/stderr) - [ "${actual}" = "/vault/userconfig/foo" ] - - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.readOnly' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.mountPath' | tee /dev/stderr) - [ "${actual}" = "/vault/userconfig/foo" ] -} - -@test "server/standalone-StatefulSet: adds extra secret volume" { - cd `chart_dir` - - # Test that it defines it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.extraVolumes[0].type=secret' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.volumes[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.secret.name' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(echo $object | - yq -r '.secret.secretName' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.extraVolumes[0].type=secret' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.volumes[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.secret.name' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(echo $object | - yq -r '.secret.secretName' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - # Test that it mounts it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.readOnly' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.mountPath' | tee /dev/stderr) - [ "${actual}" = "/vault/userconfig/foo" ] - - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.extraVolumes[0].type=configMap' \ - --set 'server.extraVolumes[0].name=foo' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "userconfig-foo")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.readOnly' | tee /dev/stderr) - [ "${actual}" = "true" ] - - local actual=$(echo $object | - yq -r '.mountPath' | tee /dev/stderr) - [ "${actual}" = "/vault/userconfig/foo" ] -} - -@test "server/standalone-StatefulSet: can mount audit" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.auditStorage.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].volumeMounts[] | select(.name == "audit")' | tee /dev/stderr) -} - -#-------------------------------------------------------------------- -# extraEnvironmentVars - -@test "server/standalone-StatefulSet: set extraEnvironmentVars" { - cd `chart_dir` - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.stanadlone.enabled=true' \ - --set 'server.extraEnvironmentVars.FOO=bar' \ - --set 'server.extraEnvironmentVars.FOOBAR=foobar' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[10].name' | tee /dev/stderr) - [ "${actual}" = "FOO" ] - - local actual=$(echo $object | - yq -r '.[10].value' | tee /dev/stderr) - [ "${actual}" = "bar" ] - - local actual=$(echo $object | - yq -r '.[11].name' | tee /dev/stderr) - [ "${actual}" = "FOOBAR" ] - - local actual=$(echo $object | - yq -r '.[11].value' | tee /dev/stderr) - [ "${actual}" = "foobar" ] - - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.extraEnvironmentVars.FOO=bar' \ - --set 'server.extraEnvironmentVars.FOOBAR=foobar' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].env' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.[10].name' | tee /dev/stderr) - [ "${actual}" = "FOO" ] - - local actual=$(echo $object | - yq -r '.[10].value' | tee /dev/stderr) - [ "${actual}" = "bar" ] - - local actual=$(echo $object | - yq -r '.[11].name' | tee /dev/stderr) - [ "${actual}" = "FOOBAR" ] - - local actual=$(echo $object | - yq -r '.[11].value' | tee /dev/stderr) - [ "${actual}" = "foobar" ] -} - -#-------------------------------------------------------------------- -# storage class - -@test "server/standalone-StatefulSet: storageClass on claim by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates[0].spec.storageClassName' | tee /dev/stderr) - [ "${actual}" = "null" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates[0].spec.storageClassName' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - - -@test "server/standalone-StatefulSet: can set storageClass" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.dataStorage.enabled=true' \ - --set 'server.dataStorage.storageClass=foo' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates[0].spec.storageClassName' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.dataStorage.enabled=false' \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.auditStorage.storageClass=foo' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates[0].spec.storageClassName' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.auditStorage.storageClass=foo' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates[1].spec.storageClassName' | tee /dev/stderr) - [ "${actual}" = "foo" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.dataStorage.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "2" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.dataStorage.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "2" ] -} - -@test "server/standalone-StatefulSet: can disable storage" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.auditStorage.enabled=false' \ - --set 'server.dataStorage.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "1" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.dataStorage.enabled=false' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "1" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.auditStorage.enabled=false' \ - --set 'server.dataStorage.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "1" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.dataStorage.enabled=false' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "1" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.dataStorage.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "2" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.auditStorage.enabled=true' \ - --set 'server.dataStorage.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "2" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.auditStorage.enabled=fa;se' \ - --set 'server.dataStorage.enabled=false' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "0" ] - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'server.auditStorage.enabled=false' \ - --set 'server.dataStorage.enabled=false' \ - . | tee /dev/stderr | - yq -r '.spec.volumeClaimTemplates | length' | tee /dev/stderr) - [ "${actual}" = "0" ] -} - -@test "server/standalone-StatefulSet: affinity is set by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq '.spec.template.spec.affinity["podAntiAffinity"]? != null' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/standalone-StatefulSet: affinity can be set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.affinity=foobar' \ - . | tee /dev/stderr | - yq '.spec.template.spec.affinity == "foobar"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/standalone-StatefulSet: tolerations not set by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq '.spec.template.spec | .tolerations? == null' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/standalone-StatefulSet: tolerations can be set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.tolerations=foobar' \ - . | tee /dev/stderr | - yq '.spec.template.spec.tolerations == "foobar"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/standalone-StatefulSet: nodeSelector is not set by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq '.spec.template.spec.nodeSelector' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "server/standalone-StatefulSet: specified nodeSelector" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.nodeSelector=testing' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.nodeSelector' | tee /dev/stderr) - [ "${actual}" = "testing" ] -} - -#-------------------------------------------------------------------- -# extraContainers - -@test "server/standalone-StatefulSet: adds extra containers" { - cd `chart_dir` - - # Test that it defines it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.extraContainers[0].image=test-image' \ - --set 'server.extraContainers[0].name=test-container' \ - --set 'server.extraContainers[0].ports[0].name=test-port' \ - --set 'server.extraContainers[0].ports[0].containerPort=9410' \ - --set 'server.extraContainers[0].ports[0].protocol=TCP' \ - --set 'server.extraContainers[0].env[0].name=TEST_ENV' \ - --set 'server.extraContainers[0].env[0].value=test_env_value' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[] | select(.name == "test-container")' | tee /dev/stderr) - - local actual=$(echo $object | - yq -r '.name' | tee /dev/stderr) - [ "${actual}" = "test-container" ] - - local actual=$(echo $object | - yq -r '.image' | tee /dev/stderr) - [ "${actual}" = "test-image" ] - - local actual=$(echo $object | - yq -r '.ports[0].name' | tee /dev/stderr) - [ "${actual}" = "test-port" ] - - local actual=$(echo $object | - yq -r '.ports[0].containerPort' | tee /dev/stderr) - [ "${actual}" = "9410" ] - - local actual=$(echo $object | - yq -r '.ports[0].protocol' | tee /dev/stderr) - [ "${actual}" = "TCP" ] - - local actual=$(echo $object | - yq -r '.env[0].name' | tee /dev/stderr) - [ "${actual}" = "TEST_ENV" ] - - local actual=$(echo $object | - yq -r '.env[0].value' | tee /dev/stderr) - [ "${actual}" = "test_env_value" ] - -} - -@test "server/standalone-StatefulSet: add two extra containers" { - cd `chart_dir` - - # Test that it defines it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.extraContainers[0].image=test-image' \ - --set 'server.extraContainers[0].name=test-container' \ - --set 'server.extraContainers[1].image=test-image' \ - --set 'server.extraContainers[1].name=test-container-2' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers' | tee /dev/stderr) - - local containers_count=$(echo $object | - yq -r 'length' | tee /dev/stderr) - [ "${containers_count}" = 3 ] - -} - -@test "server/standalone-StatefulSet: no extra containers added" { - cd `chart_dir` - - # Test that it defines it - local object=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers' | tee /dev/stderr) - - local containers_count=$(echo $object | - yq -r 'length' | tee /dev/stderr) - [ "${containers_count}" = 1 ] -} - -# sharedProcessNamespace - -@test "server/standalone-StatefulSet: shareProcessNamespace disabled by default" { - cd `chart_dir` - - # Test that it defines it - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.shareProcessNamespace' | tee /dev/stderr) - - [ "${actual}" = "null" ] -} - -@test "server/standalone-StatefulSet: shareProcessNamespace enabled" { - cd `chart_dir` - - # Test that it defines it - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.shareProcessNamespace=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.shareProcessNamespace' | tee /dev/stderr) - - [ "${actual}" = "true" ] -} - -# extra labels - -@test "server/standalone-StatefulSet: specify extraLabels" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.extraLabels.foo=bar' \ - . | tee /dev/stderr | - yq -r '.spec.template.metadata.labels.foo' | tee /dev/stderr) - [ "${actual}" = "bar" ] -} - - -#-------------------------------------------------------------------- -# Security Contexts -@test "server/standalone-StatefulSet: uid default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.runAsUser' | tee /dev/stderr) - [ "${actual}" = "100" ] -} - -@test "server/standalone-StatefulSet: uid configurable" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.uid=2000' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.runAsUser' | tee /dev/stderr) - [ "${actual}" = "2000" ] -} - -@test "server/standalone-StatefulSet: gid default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.runAsGroup' | tee /dev/stderr) - [ "${actual}" = "1000" ] -} - -@test "server/standalone-StatefulSet: gid configurable" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.gid=2000' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.runAsGroup' | tee /dev/stderr) - [ "${actual}" = "2000" ] -} - -@test "server/standalone-StatefulSet: fsgroup default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.fsGroup' | tee /dev/stderr) - [ "${actual}" = "1000" ] -} - -@test "server/standalone-StatefulSet: fsgroup configurable" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.gid=2000' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.securityContext.fsGroup' | tee /dev/stderr) - [ "${actual}" = "2000" ] -} - -#-------------------------------------------------------------------- -# health checks - -@test "server/standalone-StatefulSet: readinessProbe default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].readinessProbe.exec.command[2]' | tee /dev/stderr) - [ "${actual}" = "vault status -tls-skip-verify" ] -} - -@test "server/standalone-StatefulSet: readinessProbe configurable" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.readinessProbe.enabled=false' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].readinessProbe' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - - -@test "server/standalone-StatefulSet: livenessProbe default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].livenessProbe' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "server/standalone-StatefulSet: livenessProbe configurable" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.livenessProbe.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].livenessProbe.httpGet.path' | tee /dev/stderr) - [ "${actual}" = "/v1/sys/health?standbyok=true" ] -} - -@test "server/standalone-StatefulSet: livenessProbe initialDelaySeconds default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.livenessProbe.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].livenessProbe.initialDelaySeconds' | tee /dev/stderr) - [ "${actual}" = "60" ] -} - -@test "server/standalone-StatefulSet: livenessProbe initialDelaySeconds configurable" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.livenessProbe.enabled=true' \ - --set 'server.livenessProbe.initialDelaySeconds=30' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].livenessProbe.initialDelaySeconds' | tee /dev/stderr) - [ "${actual}" = "30" ] -} - -@test "server/standalone-StatefulSet: add extraArgs" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.extraArgs=foobar' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].args[0]' | tee /dev/stderr) - [[ "${actual}" = *"foobar"* ]] -} - -#-------------------------------------------------------------------- -# preStop -@test "server/standalone-StatefulSet: preStop sleep duration default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]' | tee /dev/stderr) - [[ "${actual}" = "sleep 5 &&"* ]] -} - -@test "server/standalone-StatefulSet: preStop sleep duration 10" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.preStopSleepSeconds=10' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].lifecycle.preStop.exec.command[2]' | tee /dev/stderr) - [[ "${actual}" = "sleep 10 &&"* ]] -} - -@test "server/standalone-StatefulSet: vault port name is http, when tlsDisable is true" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'global.tlsDisable=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].ports | map(select(.containerPort==8200)) | .[] .name' | tee /dev/stderr) - [ "${actual}" = "http" ] -} - -@test "server/standalone-StatefulSet: vault replication port name is http-rep, when tlsDisable is true" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'global.tlsDisable=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].ports | map(select(.containerPort==8202)) | .[] .name' | tee /dev/stderr) - [ "${actual}" = "http-rep" ] -} - -@test "server/standalone-StatefulSet: vault port name is https, when tlsDisable is false" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'global.tlsDisable=false' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].ports | map(select(.containerPort==8200)) | .[] .name' | tee /dev/stderr) - [ "${actual}" = "https" ] -} - -@test "server/standalone-StatefulSet: vault replication port name is https-rep, when tlsDisable is false" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'global.tlsDisable=false' \ - . | tee /dev/stderr | - yq -r '.spec.template.spec.containers[0].ports | map(select(.containerPort==8202)) | .[] .name' | tee /dev/stderr) - [ "${actual}" = "https-rep" ] -} - -#-------------------------------------------------------------------- -# annotations -@test "server/standalone-StatefulSet: generic annotations string" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.annotations=vaultIsAwesome: true' \ - . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations["vaultIsAwesome"]' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/ha-standby-Service: generic annotations yaml" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.annotations.vaultIsAwesome=true' \ - . | tee /dev/stderr | - yq -r '.spec.template.metadata.annotations["vaultIsAwesome"]' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -#-------------------------------------------------------------------- -# priorityClassName - -@test "server/standalone-StatefulSet: priorityClassName not set by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - . | tee /dev/stderr | - yq '.spec.template.spec | .priorityClassName? == null' | tee /dev/stderr) - [ "${actual}" = "true" ] -} - -@test "server/standalone-StatefulSet: priorityClassName can be set" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/server-statefulset.yaml \ - --set 'server.priorityClassName=armaggeddon' \ - . | tee /dev/stderr | - yq '.spec.template.spec | .priorityClassName == "armaggeddon"' | tee /dev/stderr) - [ "${actual}" = "true" ] -} diff --git a/charts/vkpr/vault-helm/test/unit/ui-service.bats b/charts/vkpr/vault-helm/test/unit/ui-service.bats deleted file mode 100755 index b92160b8..00000000 --- a/charts/vkpr/vault-helm/test/unit/ui-service.bats +++ /dev/null @@ -1,250 +0,0 @@ -#!/usr/bin/env bats - -load _helpers - -@test "ui/Service: disabled by default" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.dev.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.ha.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.standalone.enabled=true' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "ui/Service: disable with ui.enabled" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'ui.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'ui.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'ui.enabled=false' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "ui/Service: disable with injector.externalVaultAddr" { - cd `chart_dir` - local actual=$( (helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] - - local actual=$( (helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'injector.externalVaultAddr=http://vault-outside' \ - . || echo "---") | tee /dev/stderr | - yq 'length > 0' | tee /dev/stderr) - [ "${actual}" = "false" ] -} - -@test "ui/Service: ClusterIP type by default" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'ui.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "ClusterIP" ] - - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'ui.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "ClusterIP" ] - - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'ui.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "ClusterIP" ] -} - -@test "ui/Service: specified type" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'ui.serviceType=LoadBalancer' \ - --set 'ui.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "LoadBalancer" ] - - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'ui.serviceType=LoadBalancer' \ - --set 'ui.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "LoadBalancer" ] - - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.standalone.enabled=true' \ - --set 'ui.serviceType=LoadBalancer' \ - --set 'ui.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.type' | tee /dev/stderr) - [ "${actual}" = "LoadBalancer" ] -} - -@test "ui/Service: LoadBalancerIP set if specified and serviceType == LoadBalancer" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'ui.serviceType=LoadBalancer' \ - --set 'ui.enabled=true' \ - --set 'ui.loadBalancerIP=123.123.123.123' \ - . | tee /dev/stderr | - yq -r '.spec.loadBalancerIP' | tee /dev/stderr) - [ "${actual}" = "123.123.123.123" ] - - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'ui.serviceType=ClusterIP' \ - --set 'ui.enabled=true' \ - --set 'ui.loadBalancerIP=123.123.123.123' \ - . | tee /dev/stderr | - yq -r '.spec.loadBalancerIP' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "ui/Service: set loadBalancerSourceRanges when LoadBalancer is configured as serviceType" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'ui.serviceType=LoadBalancer' \ - --set 'ui.enabled=true' \ - --set 'ui.loadBalancerSourceRanges={"123.123.123.123"}' \ - . | tee /dev/stderr | - yq -r '.spec.loadBalancerSourceRanges[0]' | tee /dev/stderr) - [ "${actual}" = "123.123.123.123" ] - - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'ui.serviceType=ClusterIP' \ - --set 'ui.enabled=true' \ - --set 'ui.loadBalancerSourceRanges={"123.123.123.123"}' \ - . | tee /dev/stderr | - yq -r '.spec.loadBalancerSourceRanges[0]' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "ui/Service: specify annotations" { - cd `chart_dir` - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.dev.enabled=true' \ - --set 'ui.serviceType=LoadBalancer' \ - --set 'ui.enabled=true' \ - --set 'ui.annotations=foo: bar' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["foo"]' | tee /dev/stderr) - [ "${actual}" = "bar" ] - - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'ui.serviceType=LoadBalancer' \ - --set 'ui.enabled=true' \ - --set 'ui.annotations=foo: bar' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["foo"]' | tee /dev/stderr) - [ "${actual}" = "bar" ] - - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'ui.serviceType=LoadBalancer' \ - --set 'ui.enabled=true' \ - --set 'ui.annotations.foo=bar' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["foo"]' | tee /dev/stderr) - [ "${actual}" = "bar" ] - - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'server.ha.enabled=true' \ - --set 'ui.serviceType=LoadBalancer' \ - --set 'ui.enabled=true' \ - . | tee /dev/stderr | - yq -r '.metadata.annotations["foo"]' | tee /dev/stderr) - [ "${actual}" = "null" ] -} - -@test "ui/Service: port name is http, when tlsDisable is true" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'global.tlsDisable=true' \ - --set 'ui.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.ports[0].name' | tee /dev/stderr) - [ "${actual}" = "http" ] -} - -@test "ui/Service: port name is https, when tlsDisable is false" { - cd `chart_dir` - - local actual=$(helm template \ - --show-only templates/ui-service.yaml \ - --set 'global.tlsDisable=false' \ - --set 'ui.enabled=true' \ - . | tee /dev/stderr | - yq -r '.spec.ports[0].name' | tee /dev/stderr) - [ "${actual}" = "https" ] -} diff --git a/charts/vkpr/vault-helm/values.yaml b/charts/vkpr/vault-helm/values.yaml deleted file mode 100644 index 2385dcce..00000000 --- a/charts/vkpr/vault-helm/values.yaml +++ /dev/null @@ -1,441 +0,0 @@ -# Available parameters and their default values for the Vault chart. - -global: - # enabled is the master enabled switch. Setting this to true or false - # will enable or disable all the components within this chart by default. - enabled: true - # Image pull secret to use for registry authentication. - imagePullSecrets: [] - # imagePullSecrets: - # - name: image-pull-secret - # TLS for end-to-end encrypted transport - tlsDisable: true - -injector: - # True if you want to enable vault agent injection. - enabled: true - - # External vault server address for the injector to use. Setting this will - # disable deployment of a vault server along with the injector. - externalVaultAddr: "" - - # image sets the repo and tag of the vault-k8s image to use for the injector. - image: - repository: "hashicorp/vault-k8s" - tag: "0.3.0" - pullPolicy: IfNotPresent - - # agentImage sets the repo and tag of the Vault image to use for the Vault Agent - # containers. This should be set to the official Vault image. Vault 1.3.1+ is - # required. - agentImage: - repository: "vault" - tag: "1.4.0" - - # Mount Path of the Vault Kubernetes Auth Method. - authPath: "auth/kubernetes" - - # Configures the log verbosity of the injector. Supported log levels: Trace, Debug, Error, Warn, Info - logLevel: "info" - - # Configures the log format of the injector. Supported log formats: "standard", "json". - logFormat: "standard" - - # Configures all Vault Agent sidecars to revoke their token when shutting down - revokeOnShutdown: false - - # namespaceSelector is the selector for restricting the webhook to only - # specific namespaces. - # See https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-namespaceselector - # for more details. - # Example: - # namespaceSelector: - # matchLabels: - # sidecar-injector: enabled - namespaceSelector: {} - - certs: - # secretName is the name of the secret that has the TLS certificate and - # private key to serve the injector webhook. If this is null, then the - # injector will default to its automatic management mode that will assign - # a service account to the injector to generate its own certificates. - secretName: null - - # caBundle is a base64-encoded PEM-encoded certificate bundle for the - # CA that signed the TLS certificate that the webhook serves. This must - # be set if secretName is non-null. - caBundle: "" - - # certName and keyName are the names of the files within the secret for - # the TLS cert and private key, respectively. These have reasonable - # defaults but can be customized if necessary. - certName: tls.crt - keyName: tls.key - - resources: {} - # resources: - # requests: - # memory: 256Mi - # cpu: 250m - # limits: - # memory: 256Mi - # cpu: 250m - - # extraEnvironmentVars is a list of extra enviroment variables to set in the - # injector deployment. - extraEnvironmentVars: {} - # KUBERNETES_SERVICE_HOST: kubernetes.default.svc - - # Affinity Settings for injector pods - # This should be a multi-line string matching the affinity section of a - # PodSpec. - affinity: null - - # Toleration Settings for injector pods - # This should be a multi-line string matching the Toleration array - # in a PodSpec. - tolerations: null - - # nodeSelector labels for injector pod assignment, formatted as a muli-line string. - # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - # Example: - # nodeSelector: | - # beta.kubernetes.io/arch: amd64 - nodeSelector: null - - # Priority class for injector pods - priorityClassName: "" - -server: - # Resource requests, limits, etc. for the server cluster placement. This - # should map directly to the value of the resources field for a PodSpec. - # By default no direct resource request is made. - - image: - repository: "vault" - tag: "1.4.0" - # Overrides the default Image Pull Policy - pullPolicy: IfNotPresent - - # Configure the Update Strategy Type for the StatefulSet - # See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies - updateStrategyType: "OnDelete" - - resources: - # resources: - # requests: - # memory: 256Mi - # cpu: 250m - # limits: - # memory: 256Mi - # cpu: 250m - - # Ingress allows ingress services to be created to allow external access - # from Kubernetes to access Vault pods. - ingress: - enabled: false - labels: {} - # traffic: external - annotations: {} - # | - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - # or - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - hosts: - - host: chart-example.local - paths: [] - - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - - - # authDelegator enables a cluster role binding to be attached to the service - # account. This cluster role binding can be used to setup Kubernetes auth - # method. https://www.vaultproject.io/docs/auth/kubernetes.html - authDelegator: - enabled: true - - # extraContainers is a list of sidecar containers. Specified as a raw YAML string. - extraContainers: null - - # shareProcessNamespace enables process namespace sharing between Vault and the extraContainers - # This is useful if Vault must be signaled, e.g. to send a SIGHUP for log rotation - shareProcessNamespace: false - - # extraArgs is a string containing additional Vault server arguments. - extraArgs: "" - - # Used to define custom readinessProbe settings - readinessProbe: - enabled: true - # If you need to use a http path instead of the default exec - # path: /v1/sys/health?standbyok=true - # Used to enable a livenessProbe for the pods - livenessProbe: - enabled: false - path: "/v1/sys/health?standbyok=true" - initialDelaySeconds: 60 - - # Used to set the sleep time during the preStop step - preStopSleepSeconds: 5 - - # extraEnvironmentVars is a list of extra enviroment variables to set with the stateful set. These could be - # used to include variables required for auto-unseal. - extraEnvironmentVars: {} - # GOOGLE_REGION: global - # GOOGLE_PROJECT: myproject - # GOOGLE_APPLICATION_CREDENTIALS: /vault/userconfig/myproject/myproject-creds.json - - # extraSecretEnvironmentVars is a list of extra enviroment variables to set with the stateful set. - # These variables take value from existing Secret objects. - extraSecretEnvironmentVars: [] - # - envName: AWS_SECRET_ACCESS_KEY - # secretName: vault - # secretKey: AWS_SECRET_ACCESS_KEY - - # extraVolumes is a list of extra volumes to mount. These will be exposed - # to Vault in the path `/vault/userconfig//`. The value below is - # an array of objects, examples are shown below. - extraVolumes: [] - # - type: secret (or "configMap") - # name: my-secret - # path: null # default is `/vault/userconfig` - - # Affinity Settings - # Commenting out or setting as empty the affinity variable, will allow - # deployment to single node services such as Minikube - affinity: | - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - app.kubernetes.io/name: {{ template "vault.name" . }} - app.kubernetes.io/instance: "{{ .Release.Name }}" - component: server - topologyKey: kubernetes.io/hostname - - # Toleration Settings for server pods - # This should be a multi-line string matching the Toleration array - # in a PodSpec. - tolerations: {} - - # nodeSelector labels for server pod assignment, formatted as a muli-line string. - # ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector - # Example: - # nodeSelector: | - # beta.kubernetes.io/arch: amd64 - nodeSelector: {} - - # Priority class for server pods - priorityClassName: "" - - # Extra labels to attach to the server pods - # This should be a multi-line string mapping directly to the a map of - # the labels to apply to the server pods - extraLabels: {} - - # Extra annotations to attach to the server pods - # This can either be YAML or a YAML-formatted multi-line templated string map - # of the annotations to apply to the server pods - annotations: {} - - # Enables a headless service to be used by the Vault Statefulset - service: - enabled: true - # clusterIP controls whether a Cluster IP address is attached to the - # Vault service within Kubernetes. By default the Vault service will - # be given a Cluster IP address, set to None to disable. When disabled - # Kubernetes will create a "headless" service. Headless services can be - # used to communicate with pods directly through DNS instead of a round robin - # load balancer. - # clusterIP: None - - # Configures the service type for the main Vault service. Can be ClusterIP - # or NodePort. - #type: ClusterIP - - # If type is set to "NodePort", a specific nodePort value can be configured, - # will be random if left blank. - #nodePort: 30000 - - # Port on which Vault server is listening - port: 8200 - # Target port to which the service should be mapped to - targetPort: 8200 - # Extra annotations for the service definition. This can either be YAML or a - # YAML-formatted multi-line templated string map of the annotations to apply - # to the service. - annotations: {} - - # This configures the Vault Statefulset to create a PVC for data - # storage when using the file or raft backend storage engines. - # See https://www.vaultproject.io/docs/configuration/storage/index.html to know more - dataStorage: - enabled: true - # Size of the PVC created - size: 10Gi - # Name of the storage class to use. If null it will use the - # configured default Storage Class. - storageClass: null - # Access Mode of the storage device being used for the PVC - accessMode: ReadWriteOnce - - # This configures the Vault Statefulset to create a PVC for audit - # logs. Once Vault is deployed, initialized and unseal, Vault must - # be configured to use this for audit logs. This will be mounted to - # /vault/audit - # See https://www.vaultproject.io/docs/audit/index.html to know more - auditStorage: - enabled: false - # Size of the PVC created - size: 10Gi - # Name of the storage class to use. If null it will use the - # configured default Storage Class. - storageClass: null - # Access Mode of the storage device being used for the PVC - accessMode: ReadWriteOnce - - # Run Vault in "dev" mode. This requires no further setup, no state management, - # and no initialization. This is useful for experimenting with Vault without - # needing to unseal, store keys, et. al. All data is lost on restart - do not - # use dev mode for anything other than experimenting. - # See https://www.vaultproject.io/docs/concepts/dev-server.html to know more - dev: - enabled: false - - # Run Vault in "standalone" mode. This is the default mode that will deploy if - # no arguments are given to helm. This requires a PVC for data storage to use - # the "file" backend. This mode is not highly available and should not be scaled - # past a single replica. - standalone: - enabled: "-" - - # config is a raw string of default configuration when using a Stateful - # deployment. Default is to use a PersistentVolumeClaim mounted at /vault/data - # and store data there. This is only used when using a Replica count of 1, and - # using a stateful set. This should be HCL. - config: | - ui = true - - listener "tcp" { - tls_disable = 1 - address = "[::]:8200" - cluster_address = "[::]:8201" - } - storage "file" { - path = "/vault/data" - } - - # Example configuration for using auto-unseal, using Google Cloud KMS. The - # GKMS keys must already exist, and the cluster must have a service account - # that is authorized to access GCP KMS. - #seal "gcpckms" { - # project = "vault-helm-dev" - # region = "global" - # key_ring = "vault-helm-unseal-kr" - # crypto_key = "vault-helm-unseal-key" - #} - - # Run Vault in "HA" mode. There are no storage requirements unless audit log - # persistence is required. In HA mode Vault will configure itself to use Consul - # for its storage backend. The default configuration provided will work the Consul - # Helm project by default. It is possible to manually configure Vault to use a - # different HA backend. - ha: - enabled: false - replicas: 3 - - # Enables Vault's integrated Raft storage. Unlike the typical HA modes where - # Vault's persistence is external (such as Consul), enabling Raft mode will create - # persistent volumes for Vault to store data according to the configuration under server.dataStorage. - # The Vault cluster will coordinate leader elections and failovers internally. - raft: - - # Enables Raft integrated storage - enabled: false - # Set the Node Raft ID to the name of the pod - setNodeId: false - config: | - ui = true - - listener "tcp" { - tls_disable = 1 - address = "[::]:8200" - cluster_address = "[::]:8201" - } - - storage "raft" { - path = "/vault/data" - } - - service_registration "kubernetes" {} - # config is a raw string of default configuration when using a Stateful - # deployment. Default is to use a Consul for its HA storage backend. - # This should be HCL. - config: | - ui = true - - listener "tcp" { - tls_disable = 1 - address = "[::]:8200" - cluster_address = "[::]:8201" - } - storage "consul" { - path = "vault" - address = "HOST_IP:8500" - } - - service_registration "kubernetes" {} - - # Example configuration for using auto-unseal, using Google Cloud KMS. The - # GKMS keys must already exist, and the cluster must have a service account - # that is authorized to access GCP KMS. - #seal "gcpckms" { - # project = "vault-helm-dev-246514" - # region = "global" - # key_ring = "vault-helm-unseal-kr" - # crypto_key = "vault-helm-unseal-key" - #} - - # A disruption budget limits the number of pods of a replicated application - # that are down simultaneously from voluntary disruptions - disruptionBudget: - enabled: true - - # maxUnavailable will default to (n/2)-1 where n is the number of - # replicas. If you'd like a custom value, you can specify an override here. - maxUnavailable: null - - # Definition of the serviceAccount used to run Vault. - serviceAccount: - # Extra annotations for the serviceAccount definition. This can either be - # YAML or a YAML-formatted multi-line templated string map of the - # annotations to apply to the serviceAccount. - annotations: {} - -# Vault UI -ui: - # True if you want to create a Service entry for the Vault UI. - # - # serviceType can be used to control the type of service created. For - # example, setting this to "LoadBalancer" will create an external load - # balancer (for supported K8S installations) to access the UI. - enabled: false - serviceType: "ClusterIP" - serviceNodePort: null - externalPort: 8200 - - # loadBalancerSourceRanges: - # - 10.0.0.0/16 - # - 1.78.23.3/32 - - # loadBalancerIP: - - # Extra annotations to attach to the ui service - # This can either be YAML or a YAML-formatted multi-line templated string map - # of the annotations to apply to the ui service - annotations: {} diff --git a/examples/values-local.yaml b/examples/local/values-local.yaml similarity index 100% rename from examples/values-local.yaml rename to examples/local/values-local.yaml From 6fcd52b9c24b275c772d00f23627cc001c62930f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Fernandes?= Date: Wed, 21 Oct 2020 10:36:10 -0300 Subject: [PATCH 3/3] v 0.8.0 --- charts/vkpr/CRDs.md | 8 ++ charts/vkpr/acme.yaml | 39 +++++++++ examples/local/acme.yaml | 41 ++++++++++ examples/local/values-local-certs-dns.yaml | 94 ++++++++++++++++++++++ examples/local/values-local-minimal.yaml | 61 ++++++++++++++ 5 files changed, 243 insertions(+) create mode 100644 charts/vkpr/CRDs.md create mode 100644 charts/vkpr/acme.yaml create mode 100644 examples/local/acme.yaml create mode 100644 examples/local/values-local-certs-dns.yaml create mode 100644 examples/local/values-local-minimal.yaml diff --git a/charts/vkpr/CRDs.md b/charts/vkpr/CRDs.md new file mode 100644 index 00000000..fe94bfa2 --- /dev/null +++ b/charts/vkpr/CRDs.md @@ -0,0 +1,8 @@ +# CRDs de subcharts + +Estes CRDs precisam ser instalados previamente *ou* automaticamente via pasta "crds". +Em produção recomendamos *não* instalar automaticamente (usar "--skip-crds"). + +## cert-manager + +* https://github.com/jetstack/cert-manager/releases/download/v1.0.3/cert-manager.crds.yaml diff --git a/charts/vkpr/acme.yaml b/charts/vkpr/acme.yaml new file mode 100644 index 00000000..9dbfb663 --- /dev/null +++ b/charts/vkpr/acme.yaml @@ -0,0 +1,39 @@ +{{- if index .Values "cert-manager" "enabled" -}} +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-staging +spec: + acme: + # You must replace this email address with your own. + # Let's Encrypt will use this to contact you about expiring + # certificates, and issues related to your account. + email: {{ .Values.acme.email }} + server: https://acme-staging-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource used to store the account's private key. + name: acme-staging-issuer-account-key + {{- with .Values.acme.solvers }} + solvers: + {{- toYaml . | nindent 4 }} + {{- end }} +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-production +spec: + acme: + # You must replace this email address with your own. + # Let's Encrypt will use this to contact you about expiring + # certificates, and issues related to your account. + email: {{ .Values.acme.email }} + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource used to store the account's private key. + name: acme-production-issuer-account-key + {{- with .Values.acme.solvers }} + solvers: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/examples/local/acme.yaml b/examples/local/acme.yaml new file mode 100644 index 00000000..a1820c82 --- /dev/null +++ b/examples/local/acme.yaml @@ -0,0 +1,41 @@ +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-staging +spec: + acme: + # You must replace this email address with your own. + # Let's Encrypt will use this to contact you about expiring + # certificates, and issues related to your account. + email: 577b76370d-dc8645@inbox.mailtrap.io + server: https://acme-staging-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource used to store the account's private key. + name: acme-staging-issuer-account-key + solvers: + - dns01: + digitalocean: + tokenSecretRef: + name: digitalocean-dns + key: access-token +--- +apiVersion: cert-manager.io/v1 +kind: ClusterIssuer +metadata: + name: letsencrypt-production +spec: + acme: + # You must replace this email address with your own. + # Let's Encrypt will use this to contact you about expiring + # certificates, and issues related to your account. + email: 577b76370d-dc8645@inbox.mailtrap.io + server: https://acme-v02.api.letsencrypt.org/directory + privateKeySecretRef: + # Secret resource used to store the account's private key. + name: acme-production-issuer-account-key + solvers: + - dns01: + digitalocean: + tokenSecretRef: + name: digitalocean-dns + key: access-token diff --git a/examples/local/values-local-certs-dns.yaml b/examples/local/values-local-certs-dns.yaml new file mode 100644 index 00000000..93632259 --- /dev/null +++ b/examples/local/values-local-certs-dns.yaml @@ -0,0 +1,94 @@ +# +# Valores para testes locais com o k3d, mas com Cert-manager e External-DNS operando +# via DigitalOcean. +# +# O que executa: +# - Ingress controller (ingress-nginx) +# - Aplicação "whoami" com nome DNS real +# - Cert-Manager +# - External-DNS +# +# kubectl create secret generic digitalocean-dns --from-literal=access-token= +# helm upgrade -i vkpr --skip-crds -f examples/local/values-local-certs-dns.yaml ./charts/vkpr \ +# --set external-dns.digitalocean.apiToken= +# kubectl apply -f examples/local/acme.yaml +# +# Testar com: +# +# curl -k -H "Host: whoami.vkpr-dev.vertigo.com.br" https:// +# curl -k https://whoami.vkpr-dev.vertigo.com.br +# + +# +# INGRESS STACK +# +ingress-nginx: + enabled: true + +external-dns: + enabled: true + rbac: + create: true + sources: + - ingress + provider: digitalocean + # provide no value here, use --set in command line + # digitalocean: + # apiToken: + interval: "1m" + logLevel: debug + +cert-manager: + enabled: true + installCRDs: true # ok for testing + ingressShim: + defaultIssuerName: letsencrypt-staging + defaultIssuerKind: ClusterIssuer + defaultIssuerGroup: cert-manager.io + prometheus: + enabled: false + +# dados para o CRD do ACME +# acme: +# email: andre@vertigo.com.br +# solvers: +# - dns01: +# digitalocean: +# tokenSecretRef: +# name: digitalocean-dns +# key: access-token + +# chart values +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: nginx + kubernetes.io/tls-acme: "true" + hosts: + - host: whoami.vkpr-dev.vertigo.com.br + paths: ["/"] + tls: + - hosts: + - whoami.vkpr-dev.vertigo.com.br + secretName: whoami-cert + +# +# LOGGING STACK: +# + +loki-stack: + enabled: false + +# +# MONITORING STACK +# +kube-prometheus-stack: + enabled: false + +# +# SECURITY STACK +# +vault: + enabled: false +keycloak: + enabled: false diff --git a/examples/local/values-local-minimal.yaml b/examples/local/values-local-minimal.yaml new file mode 100644 index 00000000..c2f03fdc --- /dev/null +++ b/examples/local/values-local-minimal.yaml @@ -0,0 +1,61 @@ +# +# Valores para testes locais com o k3d. +# +# O que executa: +# - Ingress controller (ingress-nginx) +# - Aplicação "whoami" +# +# helm upgrade -i vkpr -f examples/local/values-local-minimal.yaml ./charts/vkpr +# +# Coloque as seguintes entradas no /etc/hosts : +# 127.0.0.1 whoami.localdomain +# +# Testar com: +# +# curl whoami.localdomain:8080 +# + +# +# INGRESS STACK +# +ingress-nginx: + enabled: true + # service: + # type: NodePort + # nodePorts: + # http: 32080 + +external-dns: + enabled: false +cert-manager: + enabled: false + +# chart values +ingress: + enabled: true + annotations: + ingress.kubernetes.io/ssl-redirect: "false" + hosts: + - host: whoami.localdomain + paths: ["/"] + +# +# LOGGING STACK: +# + +loki-stack: + enabled: false + +# +# MONITORING STACK +# +kube-prometheus-stack: + enabled: false + +# +# SECURITY STACK +# +vault: + enabled: false +keycloak: + enabled: false