Skip to content

Commit

Permalink
Use a variable for standardizing kubectl invocation (kubernetes-sigs#…
Browse files Browse the repository at this point in the history
…8329)

* Add kubectl variable

* Replace kubectl usage by kubectl variable in roles

* Remove redundant --kubeconfig on kubectl usage

* Replace unecessary shell usage with command
  • Loading branch information
VannTen authored Jan 5, 2022
1 parent 3eab112 commit cb54eb4
Show file tree
Hide file tree
Showing 25 changed files with 73 additions and 55 deletions.
2 changes: 1 addition & 1 deletion roles/kubernetes-apps/ansible/tasks/cleanup_dns.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
- name: Kubernetes Apps | Register coredns deployment annotation `createdby`
shell: "{{ bin_dir }}/kubectl get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'"
command: "{{ kubectl }} get deploy -n kube-system coredns -o jsonpath='{ .spec.template.metadata.annotations.createdby }'"
register: createdby_annotation
changed_when: false
ignore_errors: true # noqa ignore-errors
Expand Down
4 changes: 2 additions & 2 deletions roles/kubernetes-apps/csi_driver/vsphere/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,15 +29,15 @@
tags: vsphere-csi-driver

- name: vSphere CSI Driver | Generate a CSI secret manifest
command: "{{ bin_dir }}/kubectl create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml"
command: "{{ kubectl }} create secret generic vsphere-config-secret --from-file=csi-vsphere.conf={{ kube_config_dir }}/vsphere-csi-cloud-config -n kube-system --dry-run --save-config -o yaml"
register: vsphere_csi_secret_manifest
when: inventory_hostname == groups['kube_control_plane'][0]
no_log: true
tags: vsphere-csi-driver

- name: vSphere CSI Driver | Apply a CSI secret manifest
command:
cmd: "{{ bin_dir }}/kubectl apply -f -"
cmd: "{{ kubectl }} apply -f -"
stdin: "{{ vsphere_csi_secret_manifest.stdout }}"
when: inventory_hostname == groups['kube_control_plane'][0]
no_log: true
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,17 @@
- upgrade

- name: CephFS Provisioner | Remove legacy namespace
shell: |
{{ bin_dir }}/kubectl delete namespace {{ cephfs_provisioner_namespace }}
command: >
{{ kubectl }} delete namespace {{ cephfs_provisioner_namespace }}
ignore_errors: true # noqa ignore-errors
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade

- name: CephFS Provisioner | Remove legacy storageclass
shell: |
{{ bin_dir }}/kubectl delete storageclass {{ cephfs_provisioner_storage_class }}
command: >
{{ kubectl }} delete storageclass {{ cephfs_provisioner_storage_class }}
ignore_errors: true # noqa ignore-errors
when:
- inventory_hostname == groups['kube_control_plane'][0]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,17 +10,17 @@
- upgrade

- name: RBD Provisioner | Remove legacy namespace
shell: |
{{ bin_dir }}/kubectl delete namespace {{ rbd_provisioner_namespace }}
command: >
{{ kubectl }} delete namespace {{ rbd_provisioner_namespace }}
ignore_errors: true # noqa ignore-errrors
when:
- inventory_hostname == groups['kube_control_plane'][0]
tags:
- upgrade

- name: RBD Provisioner | Remove legacy storageclass
shell: |
{{ bin_dir }}/kubectl delete storageclass {{ rbd_provisioner_storage_class }}
command: >
{{ kubectl }} delete storageclass {{ rbd_provisioner_storage_class }}
ignore_errors: true # noqa ignore-errrors
when:
- inventory_hostname == groups['kube_control_plane'][0]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,8 +10,8 @@
- upgrade

- name: Cert Manager | Remove legacy namespace
shell: |
{{ bin_dir }}/kubectl delete namespace {{ cert_manager_namespace }}
command: >
{{ kubectl }} delete namespace {{ cert_manager_namespace }}
ignore_errors: true # noqa ignore-errors
when:
- inventory_hostname == groups['kube_control_plane'][0]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
run_once: true

- name: kube-router | Wait for kube-router pods to be ready
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=kube-router -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601 ignore-errors
register: pods_not_ready
until: pods_not_ready.stdout.find("kube-router")==-1
retries: 30
Expand Down
2 changes: 1 addition & 1 deletion roles/kubernetes/control-plane/tasks/kubeadm-setup.yml
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@

# FIXME(mattymo): from docs: If you don't want to taint your control-plane node, set this field to an empty slice, i.e. `taints: {}` in the YAML file.
- name: kubeadm | Remove taint for master with node role
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf taint node {{ inventory_hostname }} {{ item }}"
command: "{{ kubectl }} taint node {{ inventory_hostname }} {{ item }}"
delegate_to: "{{ first_kube_control_plane }}"
with_items:
- "node-role.kubernetes.io/master:NoSchedule-"
Expand Down
3 changes: 1 addition & 2 deletions roles/kubernetes/control-plane/tasks/kubeadm-upgrade.yml
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,7 @@
# FIXME: https://github.com/kubernetes/kubeadm/issues/1318
- name: kubeadm | scale down coredns replicas to 0 if not using coredns dns_mode
command: >-
{{ bin_dir }}/kubectl
--kubeconfig {{ kube_config_dir }}/admin.conf
{{ kubectl }}
-n kube-system
scale deployment/coredns --replicas 0
register: scale_down_coredns
Expand Down
6 changes: 3 additions & 3 deletions roles/kubernetes/kubeadm/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -115,9 +115,9 @@
# incorrectly to first master, creating SPoF.
- name: Update server field in kube-proxy kubeconfig
shell: >-
set -o pipefail && {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf get configmap kube-proxy -n kube-system -o yaml
set -o pipefail && {{ kubectl }} get configmap kube-proxy -n kube-system -o yaml
| sed 's#server:.*#server: https://127.0.0.1:{{ kube_apiserver_port }}#g'
| {{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf replace -f -
| {{ kubectl }} replace -f -
args:
executable: /bin/bash
run_once: true
Expand All @@ -139,7 +139,7 @@
mode: "0644"

- name: Restart all kube-proxy pods to ensure that they load the new configmap
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
command: "{{ kubectl }} delete pod -n kube-system -l k8s-app=kube-proxy --force --grace-period=0"
run_once: true
delegate_to: "{{ groups['kube_control_plane']|first }}"
delegate_facts: false
Expand Down
2 changes: 1 addition & 1 deletion roles/kubernetes/node-label/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@

- name: Set label to node
command: >-
{{ bin_dir }}/kubectl label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true
{{ kubectl }} label node {{ kube_override_hostname | default(inventory_hostname) }} {{ item }} --overwrite=true
loop: "{{ role_node_labels + inventory_node_labels }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
changed_when: false
Expand Down
4 changes: 4 additions & 0 deletions roles/kubespray-defaults/defaults/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,10 @@ kube_config_dir: /etc/kubernetes
kube_script_dir: "{{ bin_dir }}/kubernetes-scripts"
kube_manifest_dir: "{{ kube_config_dir }}/manifests"

# Kubectl command
# This is for consistency when using kubectl command in roles, and ensure
kubectl: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf"

# This is where all the cert scripts and certs will be located
kube_cert_dir: "{{ kube_config_dir }}/ssl"

Expand Down
2 changes: 1 addition & 1 deletion roles/network_plugin/calico/tasks/pre.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@

- name: Calico | Get kubelet hostname
shell: >-
set -o pipefail && {{ bin_dir }}/kubectl get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
set -o pipefail && {{ kubectl }} get node -o custom-columns='NAME:.metadata.name,INTERNAL-IP:.status.addresses[?(@.type=="InternalIP")].address'
| egrep "{{ ansible_all_ipv4_addresses | join('$|') }}$" | cut -d" " -f1
args:
executable: /bin/bash
Expand Down
4 changes: 2 additions & 2 deletions roles/network_plugin/calico/tasks/typha_certs.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
- name: Calico | Check if typha-server exists
command: "{{ bin_dir }}/kubectl -n kube-system get secret typha-server"
command: "{{ kubectl }} -n kube-system get secret typha-server"
register: typha_server_secret
changed_when: false
failed_when: false
Expand Down Expand Up @@ -35,7 +35,7 @@

- name: Calico | Create typha tls secrets
command: >-
{{ bin_dir }}/kubectl -n kube-system
{{ kubectl }} -n kube-system
create secret tls {{ item.name }}
--cert {{ item.cert }}
--key {{ item.key }}
Expand Down
2 changes: 1 addition & 1 deletion roles/network_plugin/cilium/tasks/apply.yml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
when: inventory_hostname == groups['kube_control_plane'][0] and not item is skipped

- name: Cilium | Wait for pods to run
command: "{{ bin_dir }}/kubectl -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
command: "{{ kubectl }} -n kube-system get pods -l k8s-app=cilium -o jsonpath='{.items[?(@.status.containerStatuses[0].ready==false)].metadata.name}'" # noqa 601
register: pods_not_ready
until: pods_not_ready.stdout.find("cilium")==-1
retries: 30
Expand Down
2 changes: 1 addition & 1 deletion roles/network_plugin/kube-ovn/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
---
- name: Kube-OVN | Label ovn-db node
command: >-
{{ bin_dir }}/kubectl label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master
{{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} kube-ovn/role=master
when:
- inventory_hostname == groups['kube_control_plane'][0]

Expand Down
6 changes: 3 additions & 3 deletions roles/network_plugin/kube-router/tasks/annotate.yml
Original file line number Diff line number Diff line change
@@ -1,20 +1,20 @@
---
- name: kube-router | Add annotations on kube_control_plane
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}"
with_items:
- "{{ kube_router_annotations_master }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: kube_router_annotations_master is defined and inventory_hostname in groups['kube_control_plane']

- name: kube-router | Add annotations on kube_node
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}"
with_items:
- "{{ kube_router_annotations_node }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when: kube_router_annotations_node is defined and inventory_hostname in groups['kube_node']

- name: kube-router | Add common annotations on all servers
command: "{{ bin_dir }}/kubectl annotate --overwrite node {{ ansible_hostname }} {{ item }}"
command: "{{ kubectl }} annotate --overwrite node {{ ansible_hostname }} {{ item }}"
with_items:
- "{{ kube_router_annotations_all }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
Expand Down
2 changes: 1 addition & 1 deletion roles/network_plugin/macvlan/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
- name: Macvlan | Retrieve Pod Cidr
command: "{{ bin_dir }}/kubectl get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'"
command: "{{ kubectl }} get nodes {{ kube_override_hostname | default(inventory_hostname) }} -o jsonpath='{.spec.podCIDR}'"
changed_when: false
register: node_pod_cidr_cmd
delegate_to: "{{ groups['kube_control_plane'][0] }}"
Expand Down
15 changes: 15 additions & 0 deletions roles/network_plugin/ovn4nfv/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
---
- name: ovn4nfv | Label control-plane node
command: >-
{{ kubectl }} label --overwrite node {{ groups['kube_control_plane'] | first }} ovn4nfv-k8s-plugin=ovn-control-plane
when:
- inventory_hostname == groups['kube_control_plane'][0]

- name: ovn4nfv | Create ovn4nfv-k8s manifests
template:
src: "{{ item.file }}.j2"
dest: "{{ kube_config_dir }}/{{ item.file }}"
with_items:
- {name: ovn-daemonset, file: ovn-daemonset.yml}
- {name: ovn4nfv-k8s-plugin, file: ovn4nfv-k8s-plugin.yml}
register: ovn4nfv_node_manifests
4 changes: 2 additions & 2 deletions roles/recover_control_plane/control-plane/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
- name: Wait for apiserver
command: "{{ bin_dir }}/kubectl get nodes"
command: "{{ kubectl }} get nodes"
environment:
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
register: apiserver_is_ready
Expand All @@ -11,7 +11,7 @@
when: groups['broken_kube_control_plane']

- name: Delete broken kube_control_plane nodes from cluster
command: "{{ bin_dir }}/kubectl delete node {{ item }}"
command: "{{ kubectl }} delete node {{ item }}"
environment:
- KUBECONFIG: "{{ ansible_env.HOME | default('/root') }}/.kube/config"
with_items: "{{ groups['broken_kube_control_plane'] }}"
Expand Down
2 changes: 1 addition & 1 deletion roles/remove-node/post-remove/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
---
- name: Delete node
command: "{{ bin_dir }}/kubectl delete node {{ kube_override_hostname|default(inventory_hostname) }}"
command: "{{ kubectl }} delete node {{ kube_override_hostname|default(inventory_hostname) }}"
delegate_to: "{{ groups['kube_control_plane']|first }}"
when: inventory_hostname in groups['k8s_cluster']
retries: 10
Expand Down
4 changes: 2 additions & 2 deletions roles/remove-node/pre-remove/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
---
- name: remove-node | List nodes
command: >-
{{ bin_dir }}/kubectl get nodes -o go-template={% raw %}'{{ range .items }}{{ .metadata.name }}{{ "\n" }}{{ end }}'{% endraw %}
{{ kubectl }} get nodes -o go-template={% raw %}'{{ range .items }}{{ .metadata.name }}{{ "\n" }}{{ end }}'{% endraw %}
register: nodes
delegate_to: "{{ groups['kube_control_plane']|first }}"
changed_when: false
run_once: true

- name: remove-node | Drain node except daemonsets resource # noqa 301
command: >-
{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf drain
{{ kubectl }} drain
--force
--ignore-daemonsets
--grace-period {{ drain_grace_period }}
Expand Down
6 changes: 3 additions & 3 deletions roles/remove-node/remove-etcd-node/tasks/main.yml
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
---
- name: Lookup node IP in kubernetes
shell: >-
{{ bin_dir }}/kubectl get nodes {{ node }}
-o jsonpath='{range.status.addresses[?(@.type=="InternalIP")]}{.address}{"\n"}{end}'
command: >
{{ kubectl }} get nodes {{ node }}
-o jsonpath={range.status.addresses[?(@.type=="InternalIP")]}{.address}{"\n"}{end}
register: remove_node_ip
when:
- inventory_hostname in groups['etcd']
Expand Down
4 changes: 2 additions & 2 deletions roles/upgrade/post-upgrade/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,15 +4,15 @@
- needs_cordoning|default(false)
- kube_network_plugin == 'cilium'
command: >
{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf
{{ kubectl }}
wait pod -n kube-system -l k8s-app=cilium
--field-selector 'spec.nodeName=={{ kube_override_hostname|default(inventory_hostname) }}'
--for=condition=Ready
--timeout={{ upgrade_post_cilium_wait_timeout }}
delegate_to: "{{ groups['kube_control_plane'][0] }}"

- name: Uncordon node
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf uncordon {{ kube_override_hostname|default(inventory_hostname) }}"
command: "{{ kubectl }} uncordon {{ kube_override_hostname|default(inventory_hostname) }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"
when:
- needs_cordoning|default(false)
22 changes: 11 additions & 11 deletions roles/upgrade/pre-upgrade/tasks/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@
# Node Ready: type = ready, status = True
# Node NotReady: type = ready, status = Unknown
- name: See if node is in ready state
shell: >-
{{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }}
-o jsonpath='{ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }'
command: >
{{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }}
-o jsonpath={ range .status.conditions[?(@.type == "Ready")].status }{ @ }{ end }
register: kubectl_node_ready
delegate_to: "{{ groups['kube_control_plane'][0] }}"
failed_when: false
Expand All @@ -28,9 +28,9 @@
# SchedulingDisabled: unschedulable = true
# else unschedulable key doesn't exist
- name: See if node is schedulable
shell: >-
{{ bin_dir }}/kubectl get node {{ kube_override_hostname|default(inventory_hostname) }}
-o jsonpath='{ .spec.unschedulable }'
command: >
{{ kubectl }} get node {{ kube_override_hostname|default(inventory_hostname) }}
-o jsonpath={ .spec.unschedulable }
register: kubectl_node_schedulable
delegate_to: "{{ groups['kube_control_plane'][0] }}"
failed_when: false
Expand All @@ -48,11 +48,11 @@
- name: Node draining
block:
- name: Cordon node
command: "{{ bin_dir }}/kubectl cordon {{ kube_override_hostname|default(inventory_hostname) }}"
command: "{{ kubectl }} cordon {{ kube_override_hostname|default(inventory_hostname) }}"
delegate_to: "{{ groups['kube_control_plane'][0] }}"

- name: Check kubectl version
command: "{{ bin_dir }}/kubectl version --client --short"
command: "{{ kubectl }} version --client --short"
register: kubectl_version
delegate_to: "{{ groups['kube_control_plane'][0] }}"
run_once: yes
Expand All @@ -70,7 +70,7 @@

- name: Drain node
command: >-
{{ bin_dir }}/kubectl drain
{{ kubectl }} drain
--force
--ignore-daemonsets
--grace-period {{ hostvars['localhost']['drain_grace_period_after_failure'] | default(drain_grace_period) }}
Expand Down Expand Up @@ -98,7 +98,7 @@

- name: Drain node - fallback with disabled eviction
command: >-
{{ bin_dir }}/kubectl drain
{{ kubectl }} drain
--force
--ignore-daemonsets
--grace-period {{ drain_fallback_grace_period }}
Expand All @@ -117,7 +117,7 @@

rescue:
- name: Set node back to schedulable
command: "{{ bin_dir }}/kubectl --kubeconfig {{ kube_config_dir }}/admin.conf uncordon {{ inventory_hostname }}"
command: "{{ kubectl }} uncordon {{ inventory_hostname }}"
when: upgrade_node_uncordon_after_drain_failure
- name: Fail after rescue
fail:
Expand Down
Loading

0 comments on commit cb54eb4

Please sign in to comment.