Skip to content

Commit

Permalink
Revert "Use pod labels to workaround service selector issue"
Browse files Browse the repository at this point in the history
This reverts commit f72d0ba.
  • Loading branch information
sd109 committed Aug 9, 2024
1 parent f72d0ba commit 63a5183
Show file tree
Hide file tree
Showing 2 changed files with 105 additions and 6 deletions.
104 changes: 104 additions & 0 deletions danswer-azimuth/templates/hooks.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,104 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: {{ .Release.Name }}-danswer-cleanup
namespace: {{ .Release.Name }}
annotations:
# Need to keep around for post-delete hooks
helm.sh/resource-policy: keep
rules:
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- get
- list
- delete
- apiGroups:
- apps
resources:
- statefulsets
verbs:
- get
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ .Release.Name }}-danswer-cleanup
namespace: {{ .Release.Name }}
annotations:
# Need to keep around for post-delete hooks
helm.sh/resource-policy: keep
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: {{ .Release.Name }}-danswer-cleanup
subjects:
- kind: ServiceAccount
name: default
namespace: {{ .Release.Name }}
---
# Delete stateful set PVCs since upstream Helm chart doesn't expose this config option
apiVersion: batch/v1
kind: Job
metadata:
name: pvc-cleanup
namespace: {{ .Release.Name }}
annotations:
helm.sh/hook: post-delete
helm.sh/hook-weight: "0"
helm.sh/hook-delete-policy: hook-succeeded
spec:
template:
spec:
containers:
- name: pvc-deleter
image: gcr.io/google_containers/hyperkube:v1.18.0
command:
- kubectl
- delete
- -n
- {{ .Release.Name }}
- pvc
- --all
restartPolicy: Never
# TODO: Is there a better service account to use with profile delete permissions?
# It seems that kubeflow-namespaced SAs can't reach the kube-api-server and
# other SAs in default/kube-system NS can't delete profile resources
serviceAccountName: default
---
# Until https://github.com/unoplat/vespa-helm-charts/pull/23
# is merged, we need to patch vespa stateful set after deployment
# so that service label selectors match correctly.
apiVersion: batch/v1
kind: Job
metadata:
name: sts-label-updater
namespace: {{ .Release.Name }}
annotations:
helm.sh/hook: post-install
helm.sh/hook-weight: "0"
helm.sh/hook-delete-policy: hook-succeeded
spec:
template:
spec:
containers:
- name: label-updater
image: gcr.io/google_containers/hyperkube:v1.18.0
command:
- kubectl
- patch
- -n
- {{ .Release.Name }}
- sts
- vespa
- -p
- {{ printf "{'spec':{'template':{'metadata':{'labels':{'app.kubernetes.io/instance':'%s'}}}}}" .Release.Name | replace "'" "\"" | squote }}
restartPolicy: Never
# TODO: Is there a better service account to use with profile delete permissions?
# It seems that kubeflow-namespaced SAs can't reach the kube-api-server and
# other SAs in default/kube-system NS can't delete profile resources
serviceAccountName: default
7 changes: 1 addition & 6 deletions danswer-azimuth/values.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -15,12 +15,6 @@ danswer-stack:
service:
type: ClusterIP
vespa:
# Workaround https://github.com/unoplat/vespa-helm-charts/issues/20
# until https://github.com/unoplat/vespa-helm-charts/pull/23 is
# merged.
podLabels:
app: vespa
app.kubernetes.io/instance: danswer
service:
type: ClusterIP
volumeClaimTemplates:
Expand All @@ -32,3 +26,4 @@ danswer-stack:
resources:
requests:
storage: *vespa-capacity

0 comments on commit 63a5183

Please sign in to comment.