diff --git a/kubevirt/examples/disks/debian12-cloud-rwo.yaml b/kubevirt/examples/disks/debian12-cloud-rwo.yaml index 170f65581..5c39c777c 100644 --- a/kubevirt/examples/disks/debian12-cloud-rwo.yaml +++ b/kubevirt/examples/disks/debian12-cloud-rwo.yaml @@ -9,7 +9,7 @@ metadata: cdi.kubevirt.io/storage.bind.immediate.requested: "true" cdi.kubevirt.io/storage.import.endpoint: "https://cloud.debian.org/images/cloud/bookworm/daily/latest/debian-12-generic-amd64-daily.qcow2" spec: - storageClassName: local-path + storageClassName: seaweedfs-storage accessModes: - ReadWriteOnce resources: diff --git a/kubevirt/examples/disks/deian12-iso-rwo.yaml b/kubevirt/examples/disks/deian12-iso-rwo.yaml index c1073607a..07c9dd8b1 100644 --- a/kubevirt/examples/disks/deian12-iso-rwo.yaml +++ b/kubevirt/examples/disks/deian12-iso-rwo.yaml @@ -9,7 +9,7 @@ metadata: cdi.kubevirt.io/storage.bind.immediate.requested: "true" cdi.kubevirt.io/storage.import.endpoint: "https://cdimage.debian.org/debian-cd/current/amd64/iso-dvd/debian-12.6.0-amd64-DVD-1.iso" spec: - storageClassName: local-path + storageClassName: seaweedfs-storage accessModes: - ReadWriteOnce resources: diff --git a/kubevirt/examples/disks/jammy-cloud-rwo.yaml b/kubevirt/examples/disks/jammy-cloud-rwo.yaml index ec83dd005..6266ed4c5 100644 --- a/kubevirt/examples/disks/jammy-cloud-rwo.yaml +++ b/kubevirt/examples/disks/jammy-cloud-rwo.yaml @@ -9,7 +9,7 @@ metadata: cdi.kubevirt.io/storage.bind.immediate.requested: "true" cdi.kubevirt.io/storage.import.endpoint: "https://cloud-images.ubuntu.com/jammy/current/jammy-server-cloudimg-amd64.img" spec: - storageClassName: local-path + storageClassName: seaweedfs-storage accessModes: - ReadWriteOnce resources: diff --git a/kubevirt/examples/disks/lunar-cloud-rwo.yaml b/kubevirt/examples/disks/lunar-cloud-rwo.yaml index 0f1293022..6242605b4 100644 --- a/kubevirt/examples/disks/lunar-cloud-rwo.yaml +++ b/kubevirt/examples/disks/lunar-cloud-rwo.yaml @@ -9,7 +9,7 @@ metadata: cdi.kubevirt.io/storage.bind.immediate.requested: "true" cdi.kubevirt.io/storage.import.endpoint: "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img" spec: - storageClassName: local-path + storageClassName: seaweedfs-storage accessModes: - ReadWriteOnce resources: diff --git a/kubevirt/examples/disks/virtio-drivers.yaml b/kubevirt/examples/disks/virtio-drivers.yaml index 8aeb17bb4..1e8a3d812 100644 --- a/kubevirt/examples/disks/virtio-drivers.yaml +++ b/kubevirt/examples/disks/virtio-drivers.yaml @@ -9,7 +9,7 @@ metadata: cdi.kubevirt.io/storage.bind.immediate.requested: "true" cdi.kubevirt.io/storage.import.endpoint: "https://fedorapeople.org/groups/virt/virtio-win/direct-downloads/archive-virtio/virtio-win-0.1.240-1/virtio-win-0.1.240.iso" spec: - storageClassName: local-path + storageClassName: seaweedfs-storage accessModes: - ReadWriteOnce resources: diff --git a/kubevirt/examples/disks/windows-10-ro.yaml b/kubevirt/examples/disks/windows-10-ro.yaml index f5d450288..a986677fc 100644 --- a/kubevirt/examples/disks/windows-10-ro.yaml +++ b/kubevirt/examples/disks/windows-10-ro.yaml @@ -9,7 +9,7 @@ metadata: cdi.kubevirt.io/storage.bind.immediate.requested: "true" cdi.kubevirt.io/storage.import.endpoint: "https://f004.backblazeb2.com/file/buildstar-public-share/windows10.iso" spec: - storageClassName: local-path + storageClassName: seaweedfs-storage accessModes: - ReadWriteOnce resources: diff --git a/kubevirt/examples/disks/windows-autoconfig.yaml b/kubevirt/examples/disks/windows-autoconfig.yaml index 10528b968..7534556cc 100644 --- a/kubevirt/examples/disks/windows-autoconfig.yaml +++ b/kubevirt/examples/disks/windows-autoconfig.yaml @@ -9,7 +9,7 @@ metadata: cdi.kubevirt.io/storage.bind.immediate.requested: "true" cdi.kubevirt.io/storage.import.endpoint: "https://f004.backblazeb2.com/file/buildstar-public-share/config.iso" spec: - storageClassName: local-path + storageClassName: seaweedfs-storage accessModes: - ReadWriteOnce resources: diff --git a/kubevirt/kubevirt-cdi-argo-app.yaml b/kubevirt/kubevirt-cdi-argo-app.yaml index f64fe8ef5..95b18ec2f 100644 --- a/kubevirt/kubevirt-cdi-argo-app.yaml +++ b/kubevirt/kubevirt-cdi-argo-app.yaml @@ -1,6 +1,6 @@ -# KubeVirt extends Kubernetes by adding additional virtualization resource types -# (especially the VM type) through Kubernetes's Custom Resource Definitions API. -# By using this mechanism, the Kubernetes API can be used to manage these VM resources +# KubeVirt extends Kubernetes by adding additional virtualization resource types +# (especially the VM type) through Kubernetes's Custom Resource Definitions API. +# By using this mechanism, the Kubernetes API can be used to manage these VM resources # alongside all other resources Kubernetes provides. # https://github.com/kubevirt/kubevirt --- @@ -9,6 +9,8 @@ kind: Application metadata: name: kubevirt-cdi namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "4" spec: destination: name: '' @@ -19,20 +21,20 @@ spec: repoURL: 'https://cloudymax.github.io/kubevirt-community-stack/' targetRevision: 0.1.5 chart: kubevirt-cdi - helm: + helm: values: | replicaCount: 1 - + image: repository: quay.io/kubevirt/cdi-operator pullPolicy: IfNotPresent tag: "" - + serviceAccount: create: true annotations: {} name: "kubevirt-cdi-service-account" - + securityContext: capabilities: drop: @@ -41,36 +43,36 @@ spec: allowPrivilegeEscalation: false seccompProfile: type: RuntimeDefault - + service: type: ClusterIP port: 8080 name: metrics protocol: TCP - + uploadProxy: type: ClusterIP port: 443 targetPort: 8443 protocol: TCP - + ingress: enabled: false - + resources: requests: cpu: 10m memory: 150Mi - + nodeSelector: kubernetes.io/os: linux - + tolerations: - key: CriticalAddonsOnly operator: Exists - + affinity: {} - + cdi: featureGates: - HonorWaitForFirstConsumer diff --git a/kubevirt/kubevirt-operator-argo-app.yaml b/kubevirt/kubevirt-operator-argo-app.yaml index 2136a6222..6b36ff539 100644 --- a/kubevirt/kubevirt-operator-argo-app.yaml +++ b/kubevirt/kubevirt-operator-argo-app.yaml @@ -1,6 +1,6 @@ -# KubeVirt extends Kubernetes by adding additional virtualization resource types -# (especially the VM type) through Kubernetes's Custom Resource Definitions API. -# By using this mechanism, the Kubernetes API can be used to manage these VM resources +# KubeVirt extends Kubernetes by adding additional virtualization resource types +# (especially the VM type) through Kubernetes's Custom Resource Definitions API. +# By using this mechanism, the Kubernetes API can be used to manage these VM resources # alongside all other resources Kubernetes provides. # https://github.com/kubevirt/kubevirt --- @@ -9,6 +9,8 @@ kind: Application metadata: name: kubevirt-operator namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "4" spec: project: kubevirt destination: @@ -19,43 +21,43 @@ spec: targetRevision: 0.2.7 chart: kubevirt helm: - values: | + values: | replicaCount: 1 - + image: repository: quay.io/kubevirt/virt-operator pullPolicy: IfNotPresent tag: "v1.2.0" - + imagePullSecrets: [] nameOverride: "" fullnameOverride: "" - + serviceAccount: create: true annotations: {} name: "kubevirt-operator" - + podAnnotations: {} - + podSecurityContext: {} - + securityContext: privileged: true - + resources: {} - + nodeSelector: {} tolerations: - key: CriticalAddonsOnly operator: Exists - + monitorNamespace: "prometheus" - prometheus: + prometheus: enabled: true serviceName: "prom-stack-kube-prometheus-prometheus" serviceNamesapce: "prometheus" - + affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: @@ -68,11 +70,11 @@ spec: - virt-operator topologyKey: kubernetes.io/hostname weight: 1 - + priorityclass: create: true value: 1000000000 - + permittedHostDevices: pciHostDevices: - pciVendorSelector: "10de:2782" @@ -84,12 +86,12 @@ spec: mediatedDevices: - mdevNameSelector: "GRID M60-2Q" resourceName: "nvidia.com/GRID_M60-2Q" - + mediatedDevicesTypes: - nvidia-18 - + useEmulation: false - + featureGates: - ExpandDisks - CPUManager diff --git a/kubevirt/s3_csi_driver.yaml b/kubevirt/s3_csi_driver.yaml new file mode 100644 index 000000000..cf94a6055 --- /dev/null +++ b/kubevirt/s3_csi_driver.yaml @@ -0,0 +1,140 @@ +--- +apiVersion: argoproj.io/v1alpha1 +kind: Application +metadata: + name: kubevirt-swfs-csi-driver + namespace: kubevirt + annotations: + argocd.argoproj.io/sync-wave: "3" +spec: + destination: + name: '' + namespace: kubevirt + server: 'https://kubernetes.default.svc' + source: + path: '' + repoURL: 'https://cloudymax.github.io/kubevirt-community-stack/' + targetRevision: 0.1.5 + chart: kubevirt-cdi + helm: + valuesObject: | + seaweedfsFiler: "http://seaweedfs-volume:8888" + storageClassName: seaweedfs-storage + isDefaultStorageClass: false + tlsSecret: "" + logVerbosity: 4 + cacheCapacityMB: 0 + + #concurrentWriters: 32 + + imagePullPolicy: "IfNotPresent" + + #imagePullSecrets: + #- name: mycredentials + + csiProvisioner: + image: registry.k8s.io/sig-storage/csi-provisioner:v3.5.0 + resources: {} + livenessProbe: + failureThreshold: + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 60 + + csiResizer: + image: registry.k8s.io/sig-storage/csi-resizer:v1.8.0 + resources: {} + livenessProbe: + failureThreshold: + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 60 + + csiAttacher: + # generally we don't need attacher, cause we do nothing to attach volume to node + # we will keep this for a historical reason + # you need to delete seaweedfs CSIDriver crd manually before upgrading with attacher disabled + # also you need to delete all seaweedfs VolumeAttachment crd manually after upgrade + enabled: true + image: registry.k8s.io/sig-storage/csi-attacher:v4.3.0 + resources: {} + livenessProbe: + failureThreshold: + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 60 + + csiSnapshotter: + # we do not support snapshots yet + enabled: false + + csiNodeDriverRegistrar: + image: registry.k8s.io/sig-storage/csi-node-driver-registrar:v2.8.0 + resources: {} + livenessProbe: + failureThreshold: + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 60 + + csiLivenessProbe: + image: registry.k8s.io/sig-storage/livenessprobe:v2.10.0 + resources: {} + + seaweedfsCsiPlugin: + image: chrislusf/seaweedfs-csi-driver:v1.2.5 + securityContext: + privileged: true + capabilities: + add: ["SYS_ADMIN"] + allowPrivilegeEscalation: true + + driverName: seaweedfs-csi-driver + + controller: + replicas: 1 + affinity: {} + tolerations: + resources: {} + livenessProbe: + failureThreshold: + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 60 + + # DataLocality (inspired by Longhorn) allows instructing the storage-driver which volume-locations will be used or preferred in Pods to read & write. + # e.g. Allows Pods to write preferrably to its local dataCenter volume-servers + # Requires Volume-Servers to be correctly labelled and matching Topology-Info to be passed into seaweedfs-csi-driver node + # Example-Value: "write_preferlocaldc" + dataLocality: "none" + + node: + # Deploy node daemonset + enabled: true + # When seaweedfs-csi-driver-node pod on node is recreated, all pods on same node using seaweed-csi PV will stop working. + # For safe update set updateStrategy.type: OnDelete and manually move pods who use seaweed-csi PV, then delete seaweedfs-csi-driver-node damonset pod + updateStrategy: + type: RollingUpdate + rollingUpdate: + maxUnavailable: 25% + affinity: {} + tolerations: + livenessProbe: + failureThreshold: + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 60 + + # Auto-Inject Topology-Info from Kubernetes node-labels using KubeMod (https://github.com/kubemod/kubemod) + # Necessary because DownwardAPI doesnt support passing node-labels (see: https://github.com/kubernetes/kubernetes/issues/40610) + # Requires KubeMod to be installed + injectTopologyInfoFromNodeLabel: + enabled: false + labels: + dataCenter: "topology.kubernetes.io/zone" + + ## Change if not using standard kubernetes deployments, like k0s + volumes: + registration_dir: /var/lib/kubelet/plugins_registry + plugins_dir: /var/lib/kubelet/plugins + pods_mount_dir: /var/lib/kubelet/pods diff --git a/kubevirt/s3_provider_appset.yaml b/kubevirt/s3_provider_appset.yaml new file mode 100644 index 000000000..8f9e46c65 --- /dev/null +++ b/kubevirt/s3_provider_appset.yaml @@ -0,0 +1,39 @@ +--- +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: kubevirt-s3-provider-app-set + namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "2" +spec: + goTemplate: true + # generator allows us to source specific values from an external k8s secret + generators: + - plugin: + configMapRef: + name: secret-var-plugin-generator + input: + parameters: + secret_vars: + - kubevirt_s3_provider + template: + metadata: + name: kubevirt-{{ .kubevirt_s3_provider }} + annotations: + argocd.argoproj.io/sync-wave: "2" + spec: + project: kubevirt + destination: + server: "https://kubernetes.default.svc" + namespace: kubevirt + syncPolicy: + syncOptions: + - ApplyOutOfSyncOnly=true + automated: + prune: true + selfHeal: true + source: + repoURL: https://github.com/small-hack/argocd-apps.git + path: kubevirt/storage/{{ .kubevirt_s3_provider }}/ + targetRevision: main diff --git a/kubevirt/s3_pvc_appset.yaml b/kubevirt/s3_pvc_appset.yaml new file mode 100644 index 000000000..41dde9243 --- /dev/null +++ b/kubevirt/s3_pvc_appset.yaml @@ -0,0 +1,89 @@ +--- +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: kubevirt-s3-pvc-app-set + namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "1" +spec: + goTemplate: true + # generator allows us to source specific values from an external k8s secret + generators: + - plugin: + configMapRef: + name: secret-var-plugin-generator + input: + parameters: + secret_vars: + - kubevirt_s3_provider + - kubevirt_s3_backup_endpoint + - kubevirt_s3_backup_bucket + - kubevirt_s3_backup_region + - kubevirt_s3_pvc_capacity + - kubevirt_pvc_backup_schedule + - global_pvc_storage_class + template: + metadata: + name: kubevirt-s3-pvc + annotations: + argocd.argoproj.io/sync-wave: "1" + spec: + project: kubevirt + destination: + server: "https://kubernetes.default.svc" + namespace: kubevirt + syncPolicy: + syncOptions: + - ApplyOutOfSyncOnly=true + automated: + prune: true + selfHeal: true + source: + repoURL: https://github.com/small-hack/argocd-apps.git + path: s3_persistence_and_backups/ + targetRevision: main + helm: + valuesObject: + provider: '{{ .kubevirt_s3_provider }}' + pvc_capacity: '{{ .kubevirt_s3_pvc_capacity }}' + pvc_storageClassName: '{{ .global_pvc_storage_class }}' + + seaweedfs: + # -- deploy a master data PVC for seaweedfs + master_pvc: false + + # not in use yet + k8up: + backup_name: "kubevirt-nightly-backup" + # -- user to run the backups as + securityContext: + runAsUser: 0 + + backup_name: 'kubevirt-nightly-backup' + + # -- can be set to 's3' or 'local' + backup_type: 's3' + + s3: + bucket: '{{ .kubevirt_s3_backup_bucket }}' + endpoint: '{{ .kubevirt_s3_backup_endpoint }}' + accessKeyIDSecretRef: + name: 's3-backups-credentials' + key: 'accessKeyId' + optional: false + secretAccessKeySecretRef: + name: 's3-backups-credentials' + key: 'secretAccessKey' + optional: false + + repoPasswordSecretRef: + name: 's3-backups-credentials' + key: 'resticRepoPassword' + + prometheus_url: 'push-gateway.prometheus.svc:9091' + + schedules: + backup: '{{ .kubevirt_pvc_backup_schedule }}' + check: '0 0 * * *' + prune: '0 0 * * *' diff --git a/kubevirt/storage/seaweedfs/seaweedfs_argocd_appset.yaml b/kubevirt/storage/seaweedfs/seaweedfs_argocd_appset.yaml new file mode 100644 index 000000000..048a3aa1a --- /dev/null +++ b/kubevirt/storage/seaweedfs/seaweedfs_argocd_appset.yaml @@ -0,0 +1,211 @@ +--- +# webapp is deployed 3rd because we need secrets and persistent volumes up first +apiVersion: argoproj.io/v1alpha1 +kind: ApplicationSet +metadata: + name: kubevirt-seaweedfs-appset + namespace: argocd + annotations: + argocd.argoproj.io/sync-wave: "1" +spec: + goTemplate: true + # generator allows us to source specific values from an external k8s secret + generators: + - plugin: + configMapRef: + name: secret-var-plugin-generator + input: + parameters: + secret_vars: + - global_cluster_issuer + - kubevirt_s3_region + - kubevirt_s3_endpoint + template: + metadata: + name: kubevirt-seaweedfs-app + annotations: + argocd.argoproj.io/sync-wave: "2" + spec: + project: kubevirt + destination: + server: https://kubernetes.default.svc + namespace: kubevirt + syncPolicy: + syncOptions: + - ApplyOutOfSyncOnly=true + automated: + prune: true + selfHeal: true + source: + repoURL: 'https://seaweedfs.github.io/seaweedfs/helm' + targetRevision: 3.68.0 + chart: seaweedfs + helm: + releaseName: kubevirt-seaweedfs + valuesObject: + global: + createClusterRole: true + imageName: chrislusf/seaweedfs + imagePullPolicy: IfNotPresent + enableSecurity: false + securityConfig: + jwtSigning: + volumeWrite: true + volumeRead: false + filerWrite: false + filerRead: false + serviceAccountName: "kubevirt-seaweedfs" + certificates: + alphacrds: false + monitoring: + enabled: false + gatewayHost: null + gatewayPort: null + enableReplication: false + replicationPlacment: "001" + extraEnvironmentVars: + WEED_CLUSTER_DEFAULT: "sw" + WEED_CLUSTER_SW_MASTER: "seaweedfs-master.seaweedfs:9333" + WEED_CLUSTER_SW_FILER: "seaweedfs-filer-client.seaweedfs:8888" + image: + registry: "" + repository: "" + master: + enabled: true + replicas: 1 + port: 9333 + grpcPort: 19333 + metricsPort: 9327 + ipBind: "0.0.0.0" + loggingOverrideLevel: null + pulseSeconds: null + garbageThreshold: null + metricsIntervalSec: 15 + defaultReplication: "000" + disableHttp: false + config: |- + # Enter any extra configuration for master.toml here. + # It may be be a multi-line string. + data: + type: "emptyDir" + # claimName: "swfs-master-data" + logs: + type: "hostPath" + size: "" + storageClass: "" + hostPathPrefix: /storage + livenessProbe: + enabled: true + httpGet: + path: /cluster/status + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 2 + successThreshold: 1 + failureThreshold: 100 + timeoutSeconds: 10 + readinessProbe: + enabled: true + httpGet: + path: /cluster/status + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 2 + successThreshold: 2 + failureThreshold: 100 + timeoutSeconds: 10 + volume: + enabled: true + port: 8080 + grpcPort: 18080 + metricsPort: 9327 + ipBind: "0.0.0.0" + replicas: 1 + loggingOverrideLevel: null + fileSizeLimitMB: null + minFreeSpacePercent: 7 + dataDirs: + - name: data + type: "existingClaim" + claimName: "swfs-volume-data" + maxVolumes: 0 + livenessProbe: + enabled: true + httpGet: + path: /status + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 2 + successThreshold: 1 + failureThreshold: 100 + timeoutSeconds: 10 + readinessProbe: + enabled: true + httpGet: + path: /status + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 2 + successThreshold: 2 + failureThreshold: 100 + timeoutSeconds: 10 + filer: + enabled: true + replicas: 1 + port: 8888 + grpcPort: 18888 + metricsPort: 9327 + encryptVolumeData: true + enablePVC: true + storage: 10Gi + data: + type: "existingClaim" + claimName: "swfs-filer-data" + s3: + enabled: true + port: 8333 + httpsPort: 0 + allowEmptyFolder: false + domainName: '{{ .kubevirt_s3_endpoint }}' + enableAuth: true + existingConfigSecret: seaweedfs-s3-secret + createBuckets: + - name: kubevirt + anonymousRead: false + - name: kubevirt-postgres + anonymousRead: false + livenessProbe: + enabled: true + httpGet: + path: / + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 2 + successThreshold: 1 + failureThreshold: 100 + timeoutSeconds: 10 + readinessProbe: + enabled: true + httpGet: + path: / + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 2 + successThreshold: 2 + failureThreshold: 100 + timeoutSeconds: 10 + s3: + enabled: false + ingress: + enabled: true + className: "nginx" + # host: false for "*" hostname + host: '{{ .kubevirt_s3_endpoint }}' + # additional ingress annotations for the s3 endpoint + annotations: + nginx.ingress.kubernetes.io/proxy-body-size: 1G + cert-manager.io/cluster-issuer: '{{ .global_cluster_issuer }}' + tls: + - secretName: kubevirt-seaweedfs-tls + hosts: + - '{{ .kubevirt_s3_endpoint }}' diff --git a/zitadel/app_of_apps/zitadel_argocd_appset.yaml b/zitadel/app_of_apps/zitadel_argocd_appset.yaml index a0ea21b54..19db36ce9 100644 --- a/zitadel/app_of_apps/zitadel_argocd_appset.yaml +++ b/zitadel/app_of_apps/zitadel_argocd_appset.yaml @@ -51,44 +51,6 @@ spec: - name: TZ value: '{{ .global_time_zone }}' - - name: ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_HOST - valueFrom: - secretKeyRef: - name: zitadel-smtp-credentials - key: host - - - name: ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_USER - valueFrom: - secretKeyRef: - name: zitadel-smtp-credentials - key: user - - - name: ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_SMTP_PASSWORD - valueFrom: - secretKeyRef: - name: zitadel-smtp-credentials - key: password - - - name: ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_TLS - value: 'true' - - - name: ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_FROM - valueFrom: - secretKeyRef: - name: zitadel-smtp-credentials - key: from_address - - - name: ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_FROMNAME - valueFrom: - secretKeyRef: - name: zitadel-smtp-credentials - key: from_name - - - name: ZITADEL_DEFAULTINSTANCE_SMTPCONFIGURATION_REPLYTOADDRESS - valueFrom: - secretKeyRef: - name: zitadel-smtp-credentials - key: reply_to_address - name: ZITADEL_LOG_LEVEL value: info