-
Notifications
You must be signed in to change notification settings - Fork 1
/
Makefile
67 lines (48 loc) · 3.25 KB
/
Makefile
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
KubeVersion = 1.30
NextKubeVersion = 1.31
RenderChart = echo "> Rendering $(1)..." && yq '.spec.values' $(1) | helm template --kube-version=$(KubeVersion) -f - $(shell yq '.metadata.name' $(1)) --repo $(shell yq '.spec.chart.repository' $(1)) $(shell yq '.spec.chart.name' $(1)) > /dev/null && echo ">> OK"
DryRunApply = echo "> Dry-run applying $(1)..." && kubectl apply -f $(1) --validate=true --dry-run=client > /dev/null && echo ">> OK"
DryRunApplyChart = echo "> Dry-run applying $(1)..." && yq '.spec.values' $(1) | helm template --kube-version=$(KubeVersion) -f - $(shell yq '.metadata.name' $(1)) --repo $(shell yq '.spec.chart.repository' $(1)) $(shell yq '.spec.chart.name' $(1)) | kubectl apply -f - --validate=true --dry-run=client > /dev/null && echo ">> OK"
# targets operating on local files
validate:
scripts/validate.sh
test: validate
# targets for interacting with flux in the cluster
reconcile:
flux reconcile kustomization flux-system --with-source
flux reconcile kustomization policies
flux reconcile kustomization nodes
flux reconcile kustomization crds
flux reconcile kustomization system
flux reconcile kustomization deployments
# targets to show logs for main cluster components
logs-flux:
kubectl logs -f -n flux deployment/flux
logs-nginx:
kubectl logs -f -n infra deployment/infra-ingress-nginx-controller
logs-certmanager:
kubectl logs -f -n infra deployment/infra-cert-manager
logs-external-dns:
kubectl logs -f -n infra deployment/infra-external-dns
logs-oauth2-proxy:
kubectl logs -f -n infra deployment/infra-oauth2-proxy
logs-sealed-secrets:
kubectl logs -f -n infra deployment/infra-sealed-secrets
logs-autoscaler:
kubectl logs -f -n autoscaler deployment/cluster-autoscaler-aws-cluster-autoscaler
# target to help with debugging
watch-helm-releases:
watch helm list -A
# targets that help during an upgrade
detect-needed-api-upgrades:
pluto detect-helm -o wide --ignore-deprecations -r || true
kubedd --target-kubernetes-version=$(NextKubeVersion) --source-kubernetes-version=$(KubeVersion) -d clusters,deployments,kustomizations,system || true
kubedd --target-kubernetes-version=$(NextKubeVersion) || true
watch-nodes:
watch -w -n0.5 'kubectl get nodes -o custom-columns=NAME:.metadata.name,STATUS:.status.conditions[-1].type,CREATED:.metadata.creationTimestamp,PROVIDERID:.spec.providerID,PROJECT:.metadata.labels.elifesciences\\.org/project,NODEPOOL:.metadata.labels.karpenter\\.sh/nodepool,CAPACITYTYPE:.metadata.labels.karpenter\\.sh/capacity-type,INSTANCETYPE:.metadata.labels.beta\\.kubernetes\\.io/instance-type,ZONE:.metadata.labels.topology\\.kubernetes\\.io/zone | sort -k6b,6'
watch-nodes-upgrading:
watch -t "kubectl get nodes --sort-by=.metadata.name -o custom-columns=NAME:.metadata.name,INSTANCE:.spec.providerID,VERSION:.status.nodeInfo.kubeletVersion,READY?:.status.conditions[3].status"
watch-pods-upgrading:
watch "kubectl get pods -A --sort-by=.spec.nodeName -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,READY?:.status.conditions[*].status,NODE:.spec.nodeName,IMAGE:.status.containerStatuses[*].image"
watch-pods-nodes-upgrading:
watch "kubectl get pods -A --sort-by=.spec.nodeName -o custom-columns=NAMESPACE:.metadata.namespace,NAME:.metadata.name,READY?:.status.conditions[*].status,NODE:.spec.nodeName"