From 3030e32cd0052981122fb74abdf55cc5879fdc0f Mon Sep 17 00:00:00 2001 From: Julien DOCHE Date: Sat, 12 Dec 2020 22:21:20 +0100 Subject: [PATCH 001/108] Support HA mode with embedded DB This enables initializing a cluster in HA mode with an embedded DB. https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/ When multiple masters are specified in the master group, k3s-ansible will add the necessary flags during the initialization phase. (i.e. --cluster-init and --server) For the embedded HA mode to work the k3s version must be >= v1.19.1 Signed-off-by: Julien DOCHE --- README.md | 5 ++++ requirements.txt | 1 + roles/k3s/master/defaults/main.yml | 11 ++++++++ roles/k3s/master/tasks/main.yml | 43 ++++++++++++++++++++++++++++++ roles/reset/tasks/main.yml | 1 + 5 files changed, 61 insertions(+) create mode 100644 requirements.txt create mode 100644 roles/k3s/master/defaults/main.yml diff --git a/README.md b/README.md index 8e3c855ce..c3a56925c 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,7 @@ Master and nodes must have passwordless SSH access First create a new directory based on the `sample` directory within the `inventory` directory: ```bash +pip install -r requirements.txt cp -R inventory/sample inventory/my-cluster ``` @@ -43,6 +44,10 @@ master node ``` +If multiple hosts are in the master group, the playbook will automatically setup k3s in HA mode with etcd. +https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/ +This requires at least k3s version 1.19.1 + If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment. Start provisioning of the cluster using the following command: diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 000000000..45c1e038e --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +jmespath diff --git a/roles/k3s/master/defaults/main.yml b/roles/k3s/master/defaults/main.yml new file mode 100644 index 000000000..df81d38c9 --- /dev/null +++ b/roles/k3s/master/defaults/main.yml @@ -0,0 +1,11 @@ +--- +ansible_user: root +server_init_args: >- + {% if groups['master'] | length > 1 %} + {% if ansible_host == groups['master'][0] %} + --cluster-init + {% else %} + --server https://{{ groups['master'][0] }}:6443 + {% endif %} + {% endif %} + {{ extra_server_args | default('') }} diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index 006aa9b84..8fcf68de5 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -1,4 +1,47 @@ --- +- name: Clean previous runs of k3s-init + systemd: + name: k3s-init + state: stopped + failed_when: false + +- name: Clean previous runs of k3s-init + command: systemctl reset-failed k3s-init + failed_when: false + changed_when: false + args: + warn: false # The ansible systemd module does not support reset-failed + +- name: Init cluster inside the transient k3s-init service + command: + cmd: "systemd-run -p RestartSec=2i \ + -p Restart=on-failure \ + -E K3S_TOKEN={{ hostvars[groups['master'][0]]['token'] }} \ + --unit=k3s-init \ + k3s server {{ server_init_args }}" + creates: "{{ systemd_dir }}/k3s.service" + args: + warn: false # The ansible systemd module does not support transient units + +- name: Verification + block: + - name: Verify that all nodes actually joined + command: + cmd: k3s kubectl get --raw /api/v1/nodes/ + creates: "{{ systemd_dir }}/k3s.service" + register: nodes + until: nodes.rc == 0 and + ((nodes.stdout | from_json)['items'] | + json_query('[*].metadata.labels."node-role.kubernetes.io/master"') | count) == (groups['master'] | length) + retries: 20 + delay: 10 + changed_when: false + always: + - name: Kill the temporary service used for initialization + systemd: + name: k3s-init + state: stopped + failed_when: false - name: Copy K3s service file register: k3s_service diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 38560c574..8d81d710f 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -8,6 +8,7 @@ with_items: - k3s - k3s-node + - k3s-init - name: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc" register: pkill_containerd_shim_runc From bb0e3c94df56cd40b768c6cf7d3b74ae2d05572d Mon Sep 17 00:00:00 2001 From: Julien DOCHE Date: Sun, 14 Feb 2021 21:26:54 +0100 Subject: [PATCH 002/108] Add custom configuration of the apiserver endpoint for HA mode This replaces the `master_ip` var by `apiserver_endpoint` for genericity. The init service is deployed only when k3s.service is not present on the machine to ensure idempotence. Signed-off-by: Julien DOCHE --- inventory/sample/group_vars/all.yml | 10 +++++++++- roles/k3s/master/defaults/main.yml | 4 ++-- roles/k3s/master/tasks/main.yml | 6 +++--- roles/k3s/node/templates/k3s.service.j2 | 2 +- 4 files changed, 15 insertions(+), 7 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index 865b76ab5..3944fce08 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -2,6 +2,14 @@ k3s_version: v1.17.5+k3s1 ansible_user: debian systemd_dir: /etc/systemd/system -master_ip: "{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}" + +# If you define multiple masters you should be providing a loadbalanced +# apiserver endpoint to all masters here. This default value is only suitable +# for a non-HA setup, if used in a HA setup, it will not protect you if the +# first node fails. + +apiserver_endpoint: "{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}" + extra_server_args: "" extra_agent_args: "" +k3s_token: "" diff --git a/roles/k3s/master/defaults/main.yml b/roles/k3s/master/defaults/main.yml index df81d38c9..84472377e 100644 --- a/roles/k3s/master/defaults/main.yml +++ b/roles/k3s/master/defaults/main.yml @@ -2,10 +2,10 @@ ansible_user: root server_init_args: >- {% if groups['master'] | length > 1 %} - {% if ansible_host == groups['master'][0] %} + {% if ansible_host == hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) %} --cluster-init {% else %} - --server https://{{ groups['master'][0] }}:6443 + --server https://{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}:6443 {% endif %} {% endif %} {{ extra_server_args | default('') }} diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index 8fcf68de5..7b07333e0 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -14,9 +14,9 @@ - name: Init cluster inside the transient k3s-init service command: - cmd: "systemd-run -p RestartSec=2i \ + cmd: "systemd-run -p RestartSec=2 \ -p Restart=on-failure \ - -E K3S_TOKEN={{ hostvars[groups['master'][0]]['token'] }} \ + -E K3S_TOKEN={{ k3s_token }} \ --unit=k3s-init \ k3s server {{ server_init_args }}" creates: "{{ systemd_dir }}/k3s.service" @@ -105,7 +105,7 @@ - name: Replace https://localhost:6443 by https://master-ip:6443 command: >- k3s kubectl config set-cluster default - --server=https://{{ master_ip }}:6443 + --server=https://{{ apiserver_endpoint }}:6443 --kubeconfig ~{{ ansible_user }}/.kube/config changed_when: true diff --git a/roles/k3s/node/templates/k3s.service.j2 b/roles/k3s/node/templates/k3s.service.j2 index 99a0ac3d0..f3854676f 100644 --- a/roles/k3s/node/templates/k3s.service.j2 +++ b/roles/k3s/node/templates/k3s.service.j2 @@ -7,7 +7,7 @@ After=network-online.target Type=notify ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe overlay -ExecStart=/usr/local/bin/k3s agent --server https://{{ master_ip }}:6443 --token {{ hostvars[groups['master'][0]]['token'] }} {{ extra_agent_args | default("") }} +ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint }}:6443 --token {{ k3s_token }} {{ extra_agent_args | default("") }} KillMode=process Delegate=yes # Having non-zero Limit*s causes performance problems due to accounting overhead From dce68494444aaa8da678588f73052c47e3b5bd85 Mon Sep 17 00:00:00 2001 From: Julien DOCHE Date: Sat, 13 Mar 2021 14:42:35 +0100 Subject: [PATCH 003/108] Fix and improve master registration verification Signed-off-by: Julien DOCHE --- README.md | 1 - requirements.txt | 1 - roles/k3s/master/tasks/main.yml | 7 ++----- 3 files changed, 2 insertions(+), 7 deletions(-) delete mode 100644 requirements.txt diff --git a/README.md b/README.md index c3a56925c..217133083 100644 --- a/README.md +++ b/README.md @@ -26,7 +26,6 @@ Master and nodes must have passwordless SSH access First create a new directory based on the `sample` directory within the `inventory` directory: ```bash -pip install -r requirements.txt cp -R inventory/sample inventory/my-cluster ``` diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 45c1e038e..000000000 --- a/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -jmespath diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index 7b07333e0..990528e25 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -27,12 +27,9 @@ block: - name: Verify that all nodes actually joined command: - cmd: k3s kubectl get --raw /api/v1/nodes/ - creates: "{{ systemd_dir }}/k3s.service" + cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}" register: nodes - until: nodes.rc == 0 and - ((nodes.stdout | from_json)['items'] | - json_query('[*].metadata.labels."node-role.kubernetes.io/master"') | count) == (groups['master'] | length) + until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['master'] | length) retries: 20 delay: 10 changed_when: false From 5816c03fc467888a79ffe15c85cd8ebd3e1cbd44 Mon Sep 17 00:00:00 2001 From: Julien DOCHE Date: Sat, 13 Mar 2021 18:28:04 +0100 Subject: [PATCH 004/108] Rename kubectl set-cluster task Signed-off-by: Julien DOCHE --- roles/k3s/master/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index 990528e25..89e613917 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -99,7 +99,7 @@ owner: "{{ ansible_user }}" mode: "u=rw,g=,o=" -- name: Replace https://localhost:6443 by https://master-ip:6443 +- name: Configure kubectl cluster to https://{{ apiserver_endpoint }}:6443 command: >- k3s kubectl config set-cluster default --server=https://{{ apiserver_endpoint }}:6443 From 59b20d4399f9ae56b28501d8ddf013de398e57c7 Mon Sep 17 00:00:00 2001 From: Julien DOCHE Date: Tue, 23 Mar 2021 23:44:39 +0100 Subject: [PATCH 005/108] Hint for k3s-init.service if initialization fails Signed-off-by: Julien DOCHE --- roles/k3s/master/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index 89e613917..45e6d1dd4 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -25,7 +25,7 @@ - name: Verification block: - - name: Verify that all nodes actually joined + - name: Verify that all nodes actually joined (check k3s-init.service if this fails) command: cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}" register: nodes From 4ed559944262d039e9c5b114b2d6b844f243eb0c Mon Sep 17 00:00:00 2001 From: Julien DOCHE Date: Mon, 29 Mar 2021 22:32:38 +0200 Subject: [PATCH 006/108] Fix k3s_token default value Signed-off-by: Julien DOCHE --- inventory/sample/group_vars/all.yml | 3 ++- roles/k3s/master/defaults/main.yml | 1 + roles/k3s/master/tasks/main.yml | 1 - roles/k3s/node/templates/k3s.service.j2 | 2 +- 4 files changed, 4 insertions(+), 3 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index 3944fce08..7d99d6008 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -7,9 +7,10 @@ systemd_dir: /etc/systemd/system # apiserver endpoint to all masters here. This default value is only suitable # for a non-HA setup, if used in a HA setup, it will not protect you if the # first node fails. +# Also you should define k3s_token so that masters can talk together securely apiserver_endpoint: "{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}" +# k3s_token: "mysupersecuretoken" extra_server_args: "" extra_agent_args: "" -k3s_token: "" diff --git a/roles/k3s/master/defaults/main.yml b/roles/k3s/master/defaults/main.yml index 84472377e..596c9cb58 100644 --- a/roles/k3s/master/defaults/main.yml +++ b/roles/k3s/master/defaults/main.yml @@ -7,5 +7,6 @@ server_init_args: >- {% else %} --server https://{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}:6443 {% endif %} + --token {{ k3s_token }} {% endif %} {{ extra_server_args | default('') }} diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index 45e6d1dd4..704c62bae 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -16,7 +16,6 @@ command: cmd: "systemd-run -p RestartSec=2 \ -p Restart=on-failure \ - -E K3S_TOKEN={{ k3s_token }} \ --unit=k3s-init \ k3s server {{ server_init_args }}" creates: "{{ systemd_dir }}/k3s.service" diff --git a/roles/k3s/node/templates/k3s.service.j2 b/roles/k3s/node/templates/k3s.service.j2 index f3854676f..01baa64ed 100644 --- a/roles/k3s/node/templates/k3s.service.j2 +++ b/roles/k3s/node/templates/k3s.service.j2 @@ -7,7 +7,7 @@ After=network-online.target Type=notify ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe overlay -ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint }}:6443 --token {{ k3s_token }} {{ extra_agent_args | default("") }} +ExecStart=/usr/local/bin/k3s agent --server https://{{ apiserver_endpoint }}:6443 --token {{ hostvars[groups['master'][0]]['token'] | default(k3s_token) }} {{ extra_agent_args | default("") }} KillMode=process Delegate=yes # Having non-zero Limit*s causes performance problems due to accounting overhead From 21a4ba935ca75a7b77e3e1c51d26c70bb7b6a287 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sun, 12 Dec 2021 21:41:41 -0500 Subject: [PATCH 007/108] FIX #159: Updated roles/reset to perform all actions as does k3s-uninstall.sh, from https://get.k3s.io Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 13 +- roles/reset/tasks/main.yml | 198 ++++++++++++++++++--- roles/reset/tasks/umount_with_children.yml | 7 + 3 files changed, 196 insertions(+), 22 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index ada7200da..a8ad23303 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -1,7 +1,18 @@ --- k3s_version: v1.22.3+k3s1 ansible_user: debian -systemd_dir: /etc/systemd/system master_ip: "{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}" extra_server_args: "" extra_agent_args: "" + +# Services information +systemd_dir: /etc/systemd/system + +k3s_services: + - k3s + - k3s-node + +k3s_service_file_extensions: + - service + - service.env + diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 728447fbb..5c5bf4499 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -1,42 +1,198 @@ --- + +# TODO: Should we use k3s-uninstall.sh from https://get/k3s.io instead so that +# we don't have to track the updates in the future? + +########################### +# Start of k3s-killall.sh # +########################### + +# +# for service in /etc/systemd/system/k3s*.service; do +# [ -s $service ] && systemctl stop $(basename $service) +# done +# - name: Disable services systemd: name: "{{ item }}" state: stopped enabled: no failed_when: false - with_items: - - k3s - - k3s-node - -- name: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc" - register: pkill_containerd_shim_runc - command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim-runc" - changed_when: "pkill_containerd_shim_runc.rc == 0" + loop: "{{ k3s_services }}" + +# +# killtree $({ set +x; } 2>/dev/null; getshims; set -x) +# +- name: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim" + register: pkill_containerd_shim + command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim" + changed_when: "pkill_containerd_shim.rc == 0" failed_when: false -- name: Umount k3s filesystems +# +# do_unmount_and_remove [the list] +# +- name: Umount k3s filesystems and remove mount points include_tasks: umount_with_children.yml - with_items: + loop: - /run/k3s - - /var/lib/kubelet - - /run/netns - /var/lib/rancher/k3s + - /var/lib/kubelet/pods + - /var/lib/kubelet/plugins + - /run/netns/cni- loop_control: loop_var: mounted_fs -- name: Remove service files, binaries and data +# +# Remove CNI namespaces +# ip netns show 2> /dev/null | grep cni- | xargs -r -t -n 1 ip netns delete +# +- name: Show CNI namespaces + register: ip_netns_show + command: ip -j netns show master cni0 + +- name: Remove CNI namespaces + command: ip netns delete {{ item }} + loop: "{{ ip_netns_show.stdout | from_json | json_query('[*].name') }}" + +# +# Remove CNI interfaces +# ip link show 2>/dev/null | grep 'master cni0' +# +# BUG: Possible bug in ip-link(8). +# "ip -j link show master cni0" exits 255 when cni0 does not exist where +# "ip -j netns show master cni0" returns "[ ]", which is preferred. +# +- name: Get list of network interface(s) that match 'master cni0' + register: ip_link_show + shell: ip -j link show master cni0 || echo "[ ]" + +- name: Remove CNI interfaces + command: ip link delete {{ item }} + loop: "{{ ip_link_show.stdout | from_json | json_query('[*].ifname') }}" + +# +# Remove other interfaces and files +# +- name: Remove other interfaces + command: ip link delete {{ item }} + when: item in ansible_interfaces + loop: + - cni0 + - flannel.1 + - flannel-v6.1 + +- name: Remove CNI files file: - name: "{{ item }}" + name: /var/lib/cni state: absent - with_items: - - /usr/local/bin/k3s - - "{{ systemd_dir }}/k3s.service" - - "{{ systemd_dir }}/k3s-node.service" - - /etc/rancher/k3s - - /var/lib/kubelet - - /var/lib/rancher/k3s + +# +# iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore +# TODO: Replace with appropriate ansible module +# +- name: Get list of KUBE- chains + register: iptables_save_restore + shell: iptables-save | egrep -v '(KUBE|CNI)-' | iptables-restore + +######################### +# End of k3s-killall.sh # +######################### + +# +# systemctl disable k3s +# systemctl reset-failed k3s +# systemctl daemon-reload +# +- name: Disable services + systemd: + name: "{{ item }}" + state: stopped + enabled: no + failed_when: false + loop: "{{ k3s_services }}" - name: daemon_reload systemd: daemon_reload: yes + +# +# rm -f /etc/systemd/system/k3s.service +# rm -f /etc/systemd/system/k3s.service.env +# +- name: Remove service files + file: + name: "{{ systemd_dir }}/{{ item }}" + state: absent + loop: "{{ k3s_services | product(k3s_service_file_extensions) | map('join', '.') }}" + +# +# if (ls /etc/systemd/system/k3s*.service || ls /etc/init.d/k3s*) >/dev/null 2>&1; then +# set +x; echo 'Additional k3s services installed, skipping uninstall of k3s'; set -x +# exit +# fi +# +# Should this be done rather than focusing on a discrete list of services (k3s_services)? + +# +# for cmd in kubectl crictl ctr; do +# if [ -L /usr/local/bin/$cmd ]; then +# rm -f /usr/local/bin/$cmd +# fi +# done +# +- name: Command files + register: stat_command_files + stat: + name: "/usr/local/bin/{{ item }}" + loop: + - kubectl + - crictl + - ctr + +- name: Remove command files + file: + name: "{{ item.stat.path }}" + state: absent + when: (item.stat.exists | default(false)) and (item.stat.islnk | default(false)) + loop: "{{ stat_command_files.results }}" + loop_control: + label: "{{ item.item }}" + +# Remove files and directories +- name: Remove files, binaries and data + file: + name: "{{ item }}" + state: absent + loop: + - /etc/rancher/k3s + - /run/k3s + - /run/flannel + - /var/lib/rancher/k3s + - /var/lib/kubelet + - /usr/local/bin/k3s + - /usr/local/bin/k3s-killall.sh + - /usr/local/bin/k3s-uninstall.sh + +- name: Remove package k3s-selinux + yum: + name: k3s-selinux + state: remove + when: ansible_pkg_mgr == "yum" + +- name: Remove package k3s-selinux + zypper: + name: k3s-selinux + state: remove + when: ansible_pkg_mgr == "zypper" + +- name: Remove yum repo files + shell: 'rm -f /etc/yum.repos.d/rancher-k3s-common*.repo' + register: remove_repo_files + when: ansible_pkg_mgr == "yum" + +- name: Remove zypper repo files + shell: 'rm -f /etc/zypp/repos.d/rancher-k3s-common*.repo' + register: remove_repo_files + when: ansible_pkg_mgr == "zypper" + diff --git a/roles/reset/tasks/umount_with_children.yml b/roles/reset/tasks/umount_with_children.yml index 5883b70a6..fe68ab36c 100644 --- a/roles/reset/tasks/umount_with_children.yml +++ b/roles/reset/tasks/umount_with_children.yml @@ -14,3 +14,10 @@ state: unmounted with_items: "{{ get_mounted_filesystems.stdout_lines | reverse | list }}" + +- name: Remove mountpoints + file: + path: "{{ item }}" + state: absent + with_items: "{{ get_mounted_filesystems.stdout_lines | reverse | list }}" + From 99e8b382c86fcbedc893b1c795c564642e642b4a Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Tue, 14 Dec 2021 13:27:18 -0500 Subject: [PATCH 008/108] FIX #160: Moved k3s_server_location to group_vars/all.yml so roles/reset can use it Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 2 ++ roles/k3s/master/defaults/main.yml | 2 -- roles/reset/tasks/main.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) delete mode 100644 roles/k3s/master/defaults/main.yml diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index a8ad23303..c82822334 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -5,6 +5,8 @@ master_ip: "{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['m extra_server_args: "" extra_agent_args: "" +k3s_server_location: /var/lib/rancher/k3s + # Services information systemd_dir: /etc/systemd/system diff --git a/roles/k3s/master/defaults/main.yml b/roles/k3s/master/defaults/main.yml deleted file mode 100644 index c56778f93..000000000 --- a/roles/k3s/master/defaults/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -k3s_server_location: /var/lib/rancher/k3s diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 5c5bf4499..cbe488a0b 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -36,7 +36,7 @@ include_tasks: umount_with_children.yml loop: - /run/k3s - - /var/lib/rancher/k3s + - "{{ k3s_server_location }}" - /var/lib/kubelet/pods - /var/lib/kubelet/plugins - /run/netns/cni- @@ -168,7 +168,7 @@ - /etc/rancher/k3s - /run/k3s - /run/flannel - - /var/lib/rancher/k3s + - "{{ k3s_server_location }}" - /var/lib/kubelet - /usr/local/bin/k3s - /usr/local/bin/k3s-killall.sh From b3e2df806aa2a3287375bccb60309f4b17942782 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Tue, 14 Dec 2021 17:14:40 -0500 Subject: [PATCH 009/108] FIX #161: Updated pkill to use k3s_server_location in roles/reset/tasks/main.yml Signed-off-by: Jon S. Stumpf --- roles/reset/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index cbe488a0b..086e21bb6 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -23,9 +23,9 @@ # # killtree $({ set +x; } 2>/dev/null; getshims; set -x) # -- name: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim" +- name: pkill -9 -f "{{ k3s_server_location }}/data/[^/]+/bin/containerd-shim" register: pkill_containerd_shim - command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim" + command: pkill -9 -f "{{ k3s_server_location }}/data/[^/]+/bin/containerd-shim" changed_when: "pkill_containerd_shim.rc == 0" failed_when: false From 01969545efe9501ef26864bf74181cb49965daf5 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Tue, 14 Dec 2021 21:56:16 -0500 Subject: [PATCH 010/108] Addressed errors introduced in last two commits due to k3s_server_location only used in k3s/master role Signed-off-by: Jon S. Stumpf --- roles/reset/tasks/main.yml | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 086e21bb6..b62f69d6e 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -20,15 +20,28 @@ failed_when: false loop: "{{ k3s_services }}" +# +# killtree $({ set +x; } 2>/dev/null; getshims; set -x) +# TODO: Why is this different from k3s servers? +# +- name: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim" + register: pkill_containerd_shim + command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim" + when: inventory_hostname not in groups['master'] + changed_when: "pkill_containerd_shim.rc == 0" + failed_when: false + # # killtree $({ set +x; } 2>/dev/null; getshims; set -x) # - name: pkill -9 -f "{{ k3s_server_location }}/data/[^/]+/bin/containerd-shim" register: pkill_containerd_shim command: pkill -9 -f "{{ k3s_server_location }}/data/[^/]+/bin/containerd-shim" + when: inventory_hostname in groups['master'] changed_when: "pkill_containerd_shim.rc == 0" failed_when: false +# # # do_unmount_and_remove [the list] # @@ -36,7 +49,7 @@ include_tasks: umount_with_children.yml loop: - /run/k3s - - "{{ k3s_server_location }}" + - "{{ k3s_server_location if inventory_hostname in groups['master'] else '/var/lib/rancher/k3s' | default('/var/lib/rancher/k3s') }}" - /var/lib/kubelet/pods - /var/lib/kubelet/plugins - /run/netns/cni- @@ -168,7 +181,7 @@ - /etc/rancher/k3s - /run/k3s - /run/flannel - - "{{ k3s_server_location }}" + - "{{ k3s_server_location if inventory_hostname in groups['master'] else '/var/lib/rancher/k3s' | default('/var/lib/rancher/k3s') }}" - /var/lib/kubelet - /usr/local/bin/k3s - /usr/local/bin/k3s-killall.sh From 4357ad9b33138e1bd3ec33c13d6883cf7004c00f Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Tue, 14 Dec 2021 23:27:43 -0500 Subject: [PATCH 011/108] FIX #162: Fixed errors with symlink'd commands when k3s_server_location is changed from the default Signed-off-by: Jon S. Stumpf --- roles/k3s/master/tasks/main.yml | 34 ++++++++++++++++++---------- roles/k3s/master/templates/k3s.sh.j2 | 23 +++++++++++++++++++ roles/reset/tasks/main.yml | 1 + 3 files changed, 46 insertions(+), 12 deletions(-) create mode 100755 roles/k3s/master/templates/k3s.sh.j2 diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index 77b58f60a..0fbd1cb13 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -59,21 +59,31 @@ owner: "{{ ansible_user }}" mode: "u=rw,g=,o=" +- name: Copy k3s.sh for symlink'd commands + register: k3s_symlink + template: + src: "k3s.sh.j2" + dest: "/usr/local/bin/k3s.sh" + owner: root + group: root + mode: 0755 + when: + - k3s_server_location is defined + - k3s_server_location != '/var/lib/rancher/k3s' + +- name: Create symlink'd commands (kubectl, crictl) + file: + src: "{{ '/usr/local/bin/k3s.sh' if k3s_server_location != '/var/lib/rancher/k3s' else '/usr/local/bin/k3s' | default('/usr/local/bin/k3s') }}" + dest: "/usr/local/bin/{{ item }}" + state: link + with_items: + - kubectl + - crictl + - name: Replace https://localhost:6443 by https://master-ip:6443 command: >- - k3s kubectl config set-cluster default + /usr/local/bin/kubectl config set-cluster default --server=https://{{ master_ip }}:6443 --kubeconfig ~{{ ansible_user }}/.kube/config changed_when: true -- name: Create kubectl symlink - file: - src: /usr/local/bin/k3s - dest: /usr/local/bin/kubectl - state: link - -- name: Create crictl symlink - file: - src: /usr/local/bin/k3s - dest: /usr/local/bin/crictl - state: link diff --git a/roles/k3s/master/templates/k3s.sh.j2 b/roles/k3s/master/templates/k3s.sh.j2 new file mode 100755 index 000000000..09dce3e8c --- /dev/null +++ b/roles/k3s/master/templates/k3s.sh.j2 @@ -0,0 +1,23 @@ +#!/bin/bash + +# k3s.sh is used to supply the --data-dir argument to k3s for symlink'd commands. +# Note: this file is only present when k3s_server_location is defined. + +K3S_BIN="/usr/local/bin/k3s" +DATA_DIR="{{ k3s_server_location }}" + +BASENAME="${0##*/}" + +if [ -z "${DATA_DIR}" ] +then + echo "k3s.sh: DATA_DIR is not set. Something went wrong with the ansible installation." 1>&2 + exit 255 +fi + +if [ "${BASENAME}" = "k3s.sh" ] +then + "${K3S_BIN}" --data-dir "${DATA_DIR}" "${@}" +else + "${K3S_BIN}" --data-dir "${DATA_DIR}" "${BASENAME}" "${@}" +fi + diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index b62f69d6e..888ad9daf 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -184,6 +184,7 @@ - "{{ k3s_server_location if inventory_hostname in groups['master'] else '/var/lib/rancher/k3s' | default('/var/lib/rancher/k3s') }}" - /var/lib/kubelet - /usr/local/bin/k3s + - /usr/local/bin/k3s.sh - /usr/local/bin/k3s-killall.sh - /usr/local/bin/k3s-uninstall.sh From a67e52d9da37a2a19a40a42e2a2f02ede822019f Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Tue, 14 Dec 2021 23:32:33 -0500 Subject: [PATCH 012/108] FIX #163: Removed ~/.kube/config on reset Signed-off-by: Jon S. Stumpf --- roles/reset/tasks/main.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 888ad9daf..e376e91d8 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -188,6 +188,12 @@ - /usr/local/bin/k3s-killall.sh - /usr/local/bin/k3s-uninstall.sh +- name: Remove ~{{ ansible_user }}/.kube/config + file: + path: "~{{ ansible_user }}/.kube/config" + state: absent + when: inventory_hostname in groups['master'] + - name: Remove package k3s-selinux yum: name: k3s-selinux From 03b8468710e238f391425c281f01adec510b85ca Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Tue, 14 Dec 2021 23:34:22 -0500 Subject: [PATCH 013/108] Fixed name for task Signed-off-by: Jon S. Stumpf --- roles/reset/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index e376e91d8..1481ef8b0 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -104,7 +104,7 @@ # iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore # TODO: Replace with appropriate ansible module # -- name: Get list of KUBE- chains +- name: Remove KUBE and CNI chains from iptables register: iptables_save_restore shell: iptables-save | egrep -v '(KUBE|CNI)-' | iptables-restore From 86bb5156afc1b16cefb6aec029f49e208fbfa57d Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Tue, 14 Dec 2021 23:37:12 -0500 Subject: [PATCH 014/108] Replace name: labels with path: Signed-off-by: Jon S. Stumpf --- roles/reset/tasks/main.yml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 1481ef8b0..f0b73f86c 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -97,7 +97,7 @@ - name: Remove CNI files file: - name: /var/lib/cni + path: /var/lib/cni state: absent # @@ -135,7 +135,7 @@ # - name: Remove service files file: - name: "{{ systemd_dir }}/{{ item }}" + path: "{{ systemd_dir }}/{{ item }}" state: absent loop: "{{ k3s_services | product(k3s_service_file_extensions) | map('join', '.') }}" @@ -157,7 +157,7 @@ - name: Command files register: stat_command_files stat: - name: "/usr/local/bin/{{ item }}" + path: "/usr/local/bin/{{ item }}" loop: - kubectl - crictl @@ -165,7 +165,7 @@ - name: Remove command files file: - name: "{{ item.stat.path }}" + path: "{{ item.stat.path }}" state: absent when: (item.stat.exists | default(false)) and (item.stat.islnk | default(false)) loop: "{{ stat_command_files.results }}" @@ -175,7 +175,7 @@ # Remove files and directories - name: Remove files, binaries and data file: - name: "{{ item }}" + path: "{{ item }}" state: absent loop: - /etc/rancher/k3s From 244e6a5f87a3314a5a2a1ddcc6c124fc06704fe0 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 13 Dec 2021 21:35:22 -0500 Subject: [PATCH 015/108] Refactored roles/download/tasks/main.yml and implemented feature to use a commit versus a version number Signed-off-by: Jon S. Stumpf --- inventory/.gitignore | 3 +- inventory/sample/group_vars/all.yml | 129 ++++++++++++++++-- .../sample/group_vars/k3s_cluster/all.yml | 24 ++++ roles/download/tasks/main.yml | 59 ++++---- roles/download/vars/main.yml | 5 + 5 files changed, 183 insertions(+), 37 deletions(-) create mode 100644 inventory/sample/group_vars/k3s_cluster/all.yml create mode 100644 roles/download/vars/main.yml diff --git a/inventory/.gitignore b/inventory/.gitignore index 568d6c0db..4435407cb 100644 --- a/inventory/.gitignore +++ b/inventory/.gitignore @@ -1,3 +1,4 @@ * !.gitignore -!sample/ \ No newline at end of file +!sample/ +!sample/k3s_cluster/ diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index c82822334..074055df6 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -1,20 +1,123 @@ --- -k3s_version: v1.22.3+k3s1 + +# This is the SSH user used to configure your hosts ansible_user: debian -master_ip: "{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}" -extra_server_args: "" -extra_agent_args: "" -k3s_server_location: /var/lib/rancher/k3s +############################################### +# k3s-install.sh flags, from https://get.k3s.io +# +# - INSTALL_K3S_SKIP_DOWNLOAD +# If set to true will not download k3s hash or binary. +# (NOT YET IMPLEMENTED) +# +#install_k3s_skip_download: false + +# - INSTALL_K3S_FORCE_RESTART +# If set to true will always restart the K3s service +# (NOT YET IMPLEMENTED) +# +#install_k3s_force_restart: false + +# - INSTALL_K3S_SYMLINK +# If set to 'skip' will not create symlinks, 'force' will overwrite, +# default will symlink if command does not exist in path. +# (NOT YET IMPLEMENTED) +# +#install_k3s_symlink: 'skip' + +# - INSTALL_K3S_SKIP_ENABLE +# If set to true will not enable or start k3s service. +# (NOT YET IMPLEMENTED) +# +#install_k3s_skip_enable: false + +# - INSTALL_K3S_SKIP_START +# If set to true will not start k3s service. +# (NOT YET IMPLEMENTED) +# +#install_k3s_skip_start: false + +# - INSTALL_K3S_VERSION +# Version of k3s to download from github. Will attempt to download from the +# stable channel if not specified. +# (IMPLEMENTED: only specifying the version; the channels are not implemented.) +# +#install_k3s_version: v1.22.4+k3s1 + +# - INSTALL_K3S_COMMIT +# Commit of k3s to download from temporary cloud storage. +# * (for developer & QA use) +# (IMPLEMENTED: need the full commit #) +# +#install_k3s_commit: + +# - INSTALL_K3S_BIN_DIR +# Directory to install k3s binary, links, and uninstall script to, or use +# /usr/local/bin as the default +# (NOT YET IMPLEMENTED) +# +#install_k3s_bin_dir: '/usr/local/bin' + +# - INSTALL_K3S_BIN_DIR_READ_ONLY +# If set to true will not write files to INSTALL_K3S_BIN_DIR, forces +# setting INSTALL_K3S_SKIP_DOWNLOAD=true +# (NOT YET IMPLEMENTED) +# +#install_k3s_bin_dir_read_only: false + +# - INSTALL_K3S_SYSTEMD_DIR +# Directory to install systemd service and environment files to, or use +# /etc/systemd/system as the default +# (IMPLEMENTED) +# +#install_k3s_systemd_dir: '/etc/systemd/system' + +# - INSTALL_K3S_EXEC +# This is replaced by specific variables for servers and agents. +# These will provide extra arguments to the "k3s" application. +# (IMPLEMENTED) +# +#install_k3s_server_args: '' +#install_k3s_agent_args: '' + +# - INSTALL_K3S_NAME +# Name of systemd service to create, will default from the k3s exec command +# if not specified. If specified the name will be prefixed with 'k3s-'. +# (NOT YET IMPLEMENTED) +# +#install_k3s_name: '' + +# - INSTALL_K3S_TYPE +# Type of systemd service to create, will default from the k3s exec command +# if not specified. +# (NOT YET IMPLEMENTED) +# +#install_k3s_type: '' + +# - INSTALL_K3S_SELINUX_WARN +# If set to true will continue if k3s-selinux policy is not found. +# (NOT YET IMPLEMENTED) +# +#install_k3s_selinux_warn: false -# Services information -systemd_dir: /etc/systemd/system +# - INSTALL_K3S_SKIP_SELINUX_RPM +# If set to true will skip automatic installation of the k3s RPM. +# (NOT YET IMPLEMENTED) +# +#install_k3s_skip_selinux_rpm: false -k3s_services: - - k3s - - k3s-node +# - INSTALL_K3S_CHANNEL_URL +# Channel URL for fetching k3s download URL. +# Defaults to 'https://update.k3s.io/v1-release/channels'. +# Must only be https:// URLs +# (NOT YET IMPLEMENTED) +# +#install_k3s_channel_url: 'https://update.k3s.io/v1-release/channels' -k3s_service_file_extensions: - - service - - service.env +# - INSTALL_K3S_CHANNEL +# Channel to use for fetching k3s download URL. +# Defaults to 'stable'. +# (NOT YET IMPLEMENTED) +# +#install_k3s_channel: 'stable' diff --git a/inventory/sample/group_vars/k3s_cluster/all.yml b/inventory/sample/group_vars/k3s_cluster/all.yml new file mode 100644 index 000000000..fa59c126c --- /dev/null +++ b/inventory/sample/group_vars/k3s_cluster/all.yml @@ -0,0 +1,24 @@ +--- +# These variables are not meant to be changed. +# All changes should happen in "group_vars/all.yml". + +k3s_version: "{{ install_k3s_version | default('v1.22.4+k3s1') }}" + +extra_server_args: "{{ install_k3s_server_args | default('') }}" +extra_agent_args: "{{ install_k3s_agent_args | default('') }}" + +systemd_dir: "{{ install_k3s_systemd_dir | default('/etc/systemd/system') }}" + +k3s_server_location: /var/lib/rancher/k3s + +master_ip: "{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}" + +# Services information +k3s_services: + - k3s + - k3s-node + +k3s_service_file_extensions: + - service + - service.env + diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 1450fd86e..1aeb39a0b 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -1,36 +1,49 @@ --- -- name: Download k3s binary x64 - get_url: - url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s - checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-amd64.txt - dest: /usr/local/bin/k3s - owner: root - group: root - mode: 0755 - when: ansible_facts.architecture == "x86_64" +# Determine architecture and suffix +- name: Check for amd64 architecture + set_fact: + k3s_arch: "amd64" + k3s_suffix: "" + when: ansible_facts.architecture == "x86_64" or + ansible_facts.architecture == "amd64" -- name: Download k3s binary arm64 - get_url: - url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-arm64 - checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm64.txt - dest: /usr/local/bin/k3s - owner: root - group: root - mode: 0755 +- name: Check for arm64 architecture + set_fact: + k3s_arch: "arm64" + k3s_suffix: "-arm64" when: - ( ansible_facts.architecture is search("arm") and ansible_facts.userspace_bits == "64" ) or ansible_facts.architecture is search("aarch64") -- name: Download k3s binary armhf +- name: Check for arm architecture + set_fact: + k3s_arch: "arm" + k3s_suffix: "-armhf" + when: + - ansible_facts.architecture is search("arm") + - ansible_facts.userspace_bits == "32" + +# Set binary and hash file URLs +- name: Determine GitHub URLs + set_fact: + binary_url: "{{ github_url }}/download/{{ k3s_version }}/k3s{{ k3s_suffix }}" + hash_url: "{{ github_url }}/download/{{ k3s_version }}/sha256sum-{{ k3s_arch }}.txt" + when: install_k3s_commit is not defined + +- name: Determine Storage URLs + set_fact: + binary_url: "{{ storage_url }}/k3s{{ k3s_suffix }}-{{ install_k3s_commit }}" + hash_url: "{{ storage_url }}/k3s{{ k3s_suffix }}-{{ install_k3s_commit }}.sha256sum" + when: install_k3s_commit is defined + +- name: Download k3s binary get_url: - url: https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/k3s-armhf - checksum: sha256:https://github.com/k3s-io/k3s/releases/download/{{ k3s_version }}/sha256sum-arm.txt + url: "{{ binary_url }}" + checksum: "sha256:{{ hash_url }}" dest: /usr/local/bin/k3s owner: root group: root mode: 0755 - when: - - ansible_facts.architecture is search("arm") - - ansible_facts.userspace_bits == "32" + diff --git a/roles/download/vars/main.yml b/roles/download/vars/main.yml new file mode 100644 index 000000000..7385483d0 --- /dev/null +++ b/roles/download/vars/main.yml @@ -0,0 +1,5 @@ +--- + +github_url: "https://github.com/k3s-io/k3s/releases" +storage_url: "https://storage.googleapis.com/k3s-ci-builds" + From 97830ee692397d441050d52ddf2c882128d44428 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 13 Dec 2021 23:55:42 -0500 Subject: [PATCH 016/108] Added support to change the bin directory; Fixed same bug as in PR#143; Added ctr to the list of symlinks Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 6 +++--- inventory/sample/group_vars/k3s_cluster/all.yml | 1 + roles/download/tasks/main.yml | 2 +- roles/k3s/master/tasks/main.yml | 11 ++++++----- roles/k3s/master/templates/k3s.service.j2 | 2 +- roles/k3s/node/templates/k3s.service.j2 | 2 +- roles/prereq/tasks/main.yml | 4 ++-- roles/reset/tasks/main.yml | 14 +++++++------- 8 files changed, 22 insertions(+), 20 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index 074055df6..71ec0be1f 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -21,9 +21,9 @@ ansible_user: debian # - INSTALL_K3S_SYMLINK # If set to 'skip' will not create symlinks, 'force' will overwrite, # default will symlink if command does not exist in path. -# (NOT YET IMPLEMENTED) +# (NOT YET IMPLEMENTED: behaves as if 'force' is chosen) # -#install_k3s_symlink: 'skip' +#install_k3s_symlink: 'force' # - INSTALL_K3S_SKIP_ENABLE # If set to true will not enable or start k3s service. @@ -54,7 +54,7 @@ ansible_user: debian # - INSTALL_K3S_BIN_DIR # Directory to install k3s binary, links, and uninstall script to, or use # /usr/local/bin as the default -# (NOT YET IMPLEMENTED) +# (IMPLEMENTED) # #install_k3s_bin_dir: '/usr/local/bin' diff --git a/inventory/sample/group_vars/k3s_cluster/all.yml b/inventory/sample/group_vars/k3s_cluster/all.yml index fa59c126c..16f6e9b69 100644 --- a/inventory/sample/group_vars/k3s_cluster/all.yml +++ b/inventory/sample/group_vars/k3s_cluster/all.yml @@ -8,6 +8,7 @@ extra_server_args: "{{ install_k3s_server_args | default('') }}" extra_agent_args: "{{ install_k3s_agent_args | default('') }}" systemd_dir: "{{ install_k3s_systemd_dir | default('/etc/systemd/system') }}" +bin_dir: "{{ install_k3s_bin_dir | default('/usr/local/bin') }}" k3s_server_location: /var/lib/rancher/k3s diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 1aeb39a0b..3be1b0aa0 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -42,7 +42,7 @@ get_url: url: "{{ binary_url }}" checksum: "sha256:{{ hash_url }}" - dest: /usr/local/bin/k3s + dest: "{{ bin_dir }}/k3s" owner: root group: root mode: 0755 diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index 0fbd1cb13..90d3cd3b3 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -63,7 +63,7 @@ register: k3s_symlink template: src: "k3s.sh.j2" - dest: "/usr/local/bin/k3s.sh" + dest: "{{ bin_dir }}/k3s.sh" owner: root group: root mode: 0755 @@ -71,18 +71,19 @@ - k3s_server_location is defined - k3s_server_location != '/var/lib/rancher/k3s' -- name: Create symlink'd commands (kubectl, crictl) +- name: Create symlink'd commands (kubectl, crictl, ctr) file: - src: "{{ '/usr/local/bin/k3s.sh' if k3s_server_location != '/var/lib/rancher/k3s' else '/usr/local/bin/k3s' | default('/usr/local/bin/k3s') }}" - dest: "/usr/local/bin/{{ item }}" + src: "{{ '{{ bin_dir }}/k3s.sh' if k3s_server_location != '/var/lib/rancher/k3s' else '{{ bin_dir }}/k3s' | default('{{ bin_dir }}/k3s') }}" + dest: "{{ bin_dir }}/{{ item }}" state: link with_items: - kubectl - crictl + - ctr - name: Replace https://localhost:6443 by https://master-ip:6443 command: >- - /usr/local/bin/kubectl config set-cluster default + {{ bin_dir }}/kubectl config set-cluster default --server=https://{{ master_ip }}:6443 --kubeconfig ~{{ ansible_user }}/.kube/config changed_when: true diff --git a/roles/k3s/master/templates/k3s.service.j2 b/roles/k3s/master/templates/k3s.service.j2 index a56ab1084..71b7bc201 100644 --- a/roles/k3s/master/templates/k3s.service.j2 +++ b/roles/k3s/master/templates/k3s.service.j2 @@ -7,7 +7,7 @@ After=network-online.target Type=notify ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe overlay -ExecStart=/usr/local/bin/k3s server --data-dir {{ k3s_server_location }} {{ extra_server_args | default("") }} +ExecStart={{ bin_dir }}/k3s server --data-dir {{ k3s_server_location }} {{ extra_server_args | default("") }} KillMode=process Delegate=yes # Having non-zero Limit*s causes performance problems due to accounting overhead diff --git a/roles/k3s/node/templates/k3s.service.j2 b/roles/k3s/node/templates/k3s.service.j2 index 99a0ac3d0..fdea28471 100644 --- a/roles/k3s/node/templates/k3s.service.j2 +++ b/roles/k3s/node/templates/k3s.service.j2 @@ -7,7 +7,7 @@ After=network-online.target Type=notify ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe overlay -ExecStart=/usr/local/bin/k3s agent --server https://{{ master_ip }}:6443 --token {{ hostvars[groups['master'][0]]['token'] }} {{ extra_agent_args | default("") }} +ExecStart={{ bin_dir }}/k3s agent --server https://{{ master_ip }}:6443 --token {{ hostvars[groups['master'][0]]['token'] }} {{ extra_agent_args | default("") }} KillMode=process Delegate=yes # Having non-zero Limit*s causes performance problems due to accounting overhead diff --git a/roles/prereq/tasks/main.yml b/roles/prereq/tasks/main.yml index e857729b8..8718db325 100644 --- a/roles/prereq/tasks/main.yml +++ b/roles/prereq/tasks/main.yml @@ -43,9 +43,9 @@ - net.bridge.bridge-nf-call-iptables - net.bridge.bridge-nf-call-ip6tables -- name: Add /usr/local/bin to sudo secure_path +- name: Add {{ bin_dir }} to sudo secure_path lineinfile: - line: 'Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin' + line: 'Defaults secure_path = /sbin:/bin:/usr/sbin:/usr/bin:{{ bin_dir }}' regexp: "Defaults(\\s)*secure_path(\\s)*=" state: present insertafter: EOF diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index f0b73f86c..b144ed589 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -149,15 +149,15 @@ # # for cmd in kubectl crictl ctr; do -# if [ -L /usr/local/bin/$cmd ]; then -# rm -f /usr/local/bin/$cmd +# if [ -L {{ bin_dir }}/$cmd ]; then +# rm -f {{ bin_dir }}/$cmd # fi # done # - name: Command files register: stat_command_files stat: - path: "/usr/local/bin/{{ item }}" + path: "{{ bin_dir }}/{{ item }}" loop: - kubectl - crictl @@ -183,10 +183,10 @@ - /run/flannel - "{{ k3s_server_location if inventory_hostname in groups['master'] else '/var/lib/rancher/k3s' | default('/var/lib/rancher/k3s') }}" - /var/lib/kubelet - - /usr/local/bin/k3s - - /usr/local/bin/k3s.sh - - /usr/local/bin/k3s-killall.sh - - /usr/local/bin/k3s-uninstall.sh + - "{{ bin_dir }}/k3s" + - "{{ bin_dir }}/k3s.sh" + - "{{ bin_dir }}/k3s-killall.sh" + - "{{ bin_dir }}/k3s-uninstall.sh" - name: Remove ~{{ ansible_user }}/.kube/config file: From a187676dba4ee798d24ee4419b80214fa22e5952 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Tue, 14 Dec 2021 00:24:53 -0500 Subject: [PATCH 017/108] Implemented install_k3s_symlink and install_k3s_bin_dir_read_only Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 7 +++++-- inventory/sample/group_vars/k3s_cluster/all.yml | 3 +++ roles/k3s/master/tasks/main.yml | 3 +++ 3 files changed, 11 insertions(+), 2 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index 71ec0be1f..dbe4ccda8 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -21,7 +21,10 @@ ansible_user: debian # - INSTALL_K3S_SYMLINK # If set to 'skip' will not create symlinks, 'force' will overwrite, # default will symlink if command does not exist in path. -# (NOT YET IMPLEMENTED: behaves as if 'force' is chosen) +# (Partially IMPLEMENTED: +# - 'force' and 'skip' are implemented +# - 'force' is the default behavior +# - It does not check if the command already exists in the path) # #install_k3s_symlink: 'force' @@ -61,7 +64,7 @@ ansible_user: debian # - INSTALL_K3S_BIN_DIR_READ_ONLY # If set to true will not write files to INSTALL_K3S_BIN_DIR, forces # setting INSTALL_K3S_SKIP_DOWNLOAD=true -# (NOT YET IMPLEMENTED) +# (IMPLEMENTED) # #install_k3s_bin_dir_read_only: false diff --git a/inventory/sample/group_vars/k3s_cluster/all.yml b/inventory/sample/group_vars/k3s_cluster/all.yml index 16f6e9b69..edba604f5 100644 --- a/inventory/sample/group_vars/k3s_cluster/all.yml +++ b/inventory/sample/group_vars/k3s_cluster/all.yml @@ -12,6 +12,9 @@ bin_dir: "{{ install_k3s_bin_dir | default('/usr/local/bin') }}" k3s_server_location: /var/lib/rancher/k3s +symlink: "{{ install_k3s_symlink | default('force') }}" +bin_dir_read_only: "{{ install_k3s_bin_dir_read_only | default(false) }}" + master_ip: "{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}" # Services information diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index 90d3cd3b3..3e373f2b8 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -80,6 +80,9 @@ - kubectl - crictl - ctr + when: + - not bin_dir_read_only + - not (symlink == 'skip') - name: Replace https://localhost:6443 by https://master-ip:6443 command: >- From 90efdd0055f32b8e31f71148c5417ad27bd68044 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 15 Dec 2021 17:47:43 -0500 Subject: [PATCH 018/108] Renamed k3s_server_location to data_dir to be consistent; Added install_k3s_data_dir option in group_vars/all.yml Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 20 +++++++++++++++---- .../sample/group_vars/k3s_cluster/all.yml | 3 +-- roles/k3s/master/tasks/main.yml | 20 +++++++++++-------- roles/k3s/master/templates/k3s.service.j2 | 2 +- roles/k3s/master/templates/k3s.sh.j2 | 4 ++-- roles/reset/tasks/main.yml | 8 ++++---- 6 files changed, 36 insertions(+), 21 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index dbe4ccda8..fc096381a 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -1,11 +1,12 @@ --- -# This is the SSH user used to configure your hosts +# This is the SSH user used by ansible to configure your hosts ansible_user: debian -############################################### -# k3s-install.sh flags, from https://get.k3s.io -# +######################################################## +# Begin: k3s-install.sh flags, from https://get.k3s.io # +######################################################## + # - INSTALL_K3S_SKIP_DOWNLOAD # If set to true will not download k3s hash or binary. # (NOT YET IMPLEMENTED) @@ -124,3 +125,14 @@ ansible_user: debian # #install_k3s_channel: 'stable' +######################################################## +# End: k3s-install.sh flags, from https://get.k3s.io # +######################################################## + +# - INSTALL_K3S_DATA_DIR +# Change the data directory for the k3s server. Defaults to '/var/lib/rancher/k3s'. +# TODO: submit PR to k3s-io/k3s +# (IMPLEMENTED) +# +#install_k3s_data_dir: '/var/lib/rancher/k3s' + diff --git a/inventory/sample/group_vars/k3s_cluster/all.yml b/inventory/sample/group_vars/k3s_cluster/all.yml index edba604f5..2bb4d0937 100644 --- a/inventory/sample/group_vars/k3s_cluster/all.yml +++ b/inventory/sample/group_vars/k3s_cluster/all.yml @@ -9,8 +9,7 @@ extra_agent_args: "{{ install_k3s_agent_args | default('') }}" systemd_dir: "{{ install_k3s_systemd_dir | default('/etc/systemd/system') }}" bin_dir: "{{ install_k3s_bin_dir | default('/usr/local/bin') }}" - -k3s_server_location: /var/lib/rancher/k3s +data_dir: "{{ install_k3s_data_dir | default('/var/lib/rancher/k3s') }}" symlink: "{{ install_k3s_symlink | default('force') }}" bin_dir_read_only: "{{ install_k3s_bin_dir_read_only | default(false) }}" diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index 3e373f2b8..c2451f401 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -18,21 +18,21 @@ - name: Wait for node-token wait_for: - path: "{{ k3s_server_location }}/server/node-token" + path: "{{ data_dir }}/server/node-token" - name: Register node-token file access mode stat: - path: "{{ k3s_server_location }}/server/node-token" + path: "{{ data_dir }}/server/node-token" register: p - name: Change file access node-token file: - path: "{{ k3s_server_location }}/server/node-token" + path: "{{ data_dir }}/server/node-token" mode: "g+rx,o+rx" - name: Read node-token from master slurp: - path: "{{ k3s_server_location }}/server/node-token" + path: "{{ data_dir }}/server/node-token" register: node_token - name: Store Master node-token @@ -41,7 +41,7 @@ - name: Restore node-token file access file: - path: "{{ k3s_server_location }}/server/node-token" + path: "{{ data_dir }}/server/node-token" mode: "{{ p.stat.mode }}" - name: Create directory .kube @@ -68,12 +68,16 @@ group: root mode: 0755 when: - - k3s_server_location is defined - - k3s_server_location != '/var/lib/rancher/k3s' + - data_dir is defined + - data_dir != '/var/lib/rancher/k3s' + +- name: Determine k3s bin target + set_fact: + k3s_bin_target: "{{ '{{ bin_dir }}/k3s.sh' if data_dir != '/var/lib/rancher/k3s' else '{{ bin_dir }}/k3s' | default('{{ bin_dir }}/k3s') }}" - name: Create symlink'd commands (kubectl, crictl, ctr) file: - src: "{{ '{{ bin_dir }}/k3s.sh' if k3s_server_location != '/var/lib/rancher/k3s' else '{{ bin_dir }}/k3s' | default('{{ bin_dir }}/k3s') }}" + src: "{{ k3s_bin_target }}" dest: "{{ bin_dir }}/{{ item }}" state: link with_items: diff --git a/roles/k3s/master/templates/k3s.service.j2 b/roles/k3s/master/templates/k3s.service.j2 index 71b7bc201..346b927a1 100644 --- a/roles/k3s/master/templates/k3s.service.j2 +++ b/roles/k3s/master/templates/k3s.service.j2 @@ -7,7 +7,7 @@ After=network-online.target Type=notify ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe overlay -ExecStart={{ bin_dir }}/k3s server --data-dir {{ k3s_server_location }} {{ extra_server_args | default("") }} +ExecStart={{ bin_dir }}/k3s server --data-dir "{{ data_dir }}" {{ extra_server_args | default("") }} KillMode=process Delegate=yes # Having non-zero Limit*s causes performance problems due to accounting overhead diff --git a/roles/k3s/master/templates/k3s.sh.j2 b/roles/k3s/master/templates/k3s.sh.j2 index 09dce3e8c..6d46a00f8 100755 --- a/roles/k3s/master/templates/k3s.sh.j2 +++ b/roles/k3s/master/templates/k3s.sh.j2 @@ -1,10 +1,10 @@ #!/bin/bash # k3s.sh is used to supply the --data-dir argument to k3s for symlink'd commands. -# Note: this file is only present when k3s_server_location is defined. +# Note: this file is only present when data_dir is defined. K3S_BIN="/usr/local/bin/k3s" -DATA_DIR="{{ k3s_server_location }}" +DATA_DIR="{{ data_dir }}" BASENAME="${0##*/}" diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index b144ed589..ce6e0a63c 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -34,9 +34,9 @@ # # killtree $({ set +x; } 2>/dev/null; getshims; set -x) # -- name: pkill -9 -f "{{ k3s_server_location }}/data/[^/]+/bin/containerd-shim" +- name: pkill -9 -f "{{ data_dir }}/data/[^/]+/bin/containerd-shim" register: pkill_containerd_shim - command: pkill -9 -f "{{ k3s_server_location }}/data/[^/]+/bin/containerd-shim" + command: pkill -9 -f "{{ data_dir }}/data/[^/]+/bin/containerd-shim" when: inventory_hostname in groups['master'] changed_when: "pkill_containerd_shim.rc == 0" failed_when: false @@ -49,7 +49,7 @@ include_tasks: umount_with_children.yml loop: - /run/k3s - - "{{ k3s_server_location if inventory_hostname in groups['master'] else '/var/lib/rancher/k3s' | default('/var/lib/rancher/k3s') }}" + - "{{ data_dir if inventory_hostname in groups['master'] else '/var/lib/rancher/k3s' | default('/var/lib/rancher/k3s') }}" - /var/lib/kubelet/pods - /var/lib/kubelet/plugins - /run/netns/cni- @@ -181,7 +181,7 @@ - /etc/rancher/k3s - /run/k3s - /run/flannel - - "{{ k3s_server_location if inventory_hostname in groups['master'] else '/var/lib/rancher/k3s' | default('/var/lib/rancher/k3s') }}" + - "{{ data_dir if inventory_hostname in groups['master'] else '/var/lib/rancher/k3s' | default('/var/lib/rancher/k3s') }}" - /var/lib/kubelet - "{{ bin_dir }}/k3s" - "{{ bin_dir }}/k3s.sh" From 600a37536538ce950b7d714fe5fac449dd6b22a5 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 15 Dec 2021 19:18:12 -0500 Subject: [PATCH 019/108] Reorder roles/k3s/master/tasks/main.yml and add comments Signed-off-by: Jon S. Stumpf --- roles/k3s/master/tasks/main.yml | 44 ++++++++++++++++++++++----------- 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index c2451f401..1c06a325b 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -1,5 +1,9 @@ --- +################################################################################ +# Setup k3s service +# + - name: Copy K3s service file register: k3s_service template: @@ -16,6 +20,10 @@ state: restarted enabled: yes +################################################################################ +# node-token tasks +# + - name: Wait for node-token wait_for: path: "{{ data_dir }}/server/node-token" @@ -44,20 +52,9 @@ path: "{{ data_dir }}/server/node-token" mode: "{{ p.stat.mode }}" -- name: Create directory .kube - file: - path: ~{{ ansible_user }}/.kube - state: directory - owner: "{{ ansible_user }}" - mode: "u=rwx,g=rx,o=" - -- name: Copy config file to user home directory - copy: - src: /etc/rancher/k3s/k3s.yaml - dest: ~{{ ansible_user }}/.kube/config - remote_src: yes - owner: "{{ ansible_user }}" - mode: "u=rw,g=,o=" +################################################################################ +# Create ctl commands +# - name: Copy k3s.sh for symlink'd commands register: k3s_symlink @@ -88,6 +85,25 @@ - not bin_dir_read_only - not (symlink == 'skip') +################################################################################ +# Setup {{ ansible_user }}/.kube/config +# + +- name: Create directory .kube + file: + path: ~{{ ansible_user }}/.kube + state: directory + owner: "{{ ansible_user }}" + mode: "u=rwx,g=rx,o=" + +- name: Copy config file to user home directory + copy: + src: /etc/rancher/k3s/k3s.yaml + dest: ~{{ ansible_user }}/.kube/config + remote_src: yes + owner: "{{ ansible_user }}" + mode: "u=rw,g=,o=" + - name: Replace https://localhost:6443 by https://master-ip:6443 command: >- {{ bin_dir }}/kubectl config set-cluster default From 33c71de68eb6cfbe2706a6a756a6047586bdd3cd Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 15 Dec 2021 20:15:31 -0500 Subject: [PATCH 020/108] Made data_dir apply to all cluster hosts, not just master hosts Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 2 +- roles/k3s/node/templates/k3s.service.j2 | 2 +- roles/reset/tasks/main.yml | 16 ++-------------- 3 files changed, 4 insertions(+), 16 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index fc096381a..61477d084 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -130,7 +130,7 @@ ansible_user: debian ######################################################## # - INSTALL_K3S_DATA_DIR -# Change the data directory for the k3s server. Defaults to '/var/lib/rancher/k3s'. +# Change the data directory for the k3s service. Defaults to '/var/lib/rancher/k3s'. # TODO: submit PR to k3s-io/k3s # (IMPLEMENTED) # diff --git a/roles/k3s/node/templates/k3s.service.j2 b/roles/k3s/node/templates/k3s.service.j2 index fdea28471..4c87b1e84 100644 --- a/roles/k3s/node/templates/k3s.service.j2 +++ b/roles/k3s/node/templates/k3s.service.j2 @@ -7,7 +7,7 @@ After=network-online.target Type=notify ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe overlay -ExecStart={{ bin_dir }}/k3s agent --server https://{{ master_ip }}:6443 --token {{ hostvars[groups['master'][0]]['token'] }} {{ extra_agent_args | default("") }} +ExecStart={{ bin_dir }}/k3s agent --data-dir "{{ data_dir }}" --server https://{{ master_ip }}:6443 --token {{ hostvars[groups['master'][0]]['token'] }} {{ extra_agent_args | default("") }} KillMode=process Delegate=yes # Having non-zero Limit*s causes performance problems due to accounting overhead diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index ce6e0a63c..21d5e3696 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -20,24 +20,12 @@ failed_when: false loop: "{{ k3s_services }}" -# -# killtree $({ set +x; } 2>/dev/null; getshims; set -x) -# TODO: Why is this different from k3s servers? -# -- name: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim" - register: pkill_containerd_shim - command: pkill -9 -f "k3s/data/[^/]+/bin/containerd-shim" - when: inventory_hostname not in groups['master'] - changed_when: "pkill_containerd_shim.rc == 0" - failed_when: false - # # killtree $({ set +x; } 2>/dev/null; getshims; set -x) # - name: pkill -9 -f "{{ data_dir }}/data/[^/]+/bin/containerd-shim" register: pkill_containerd_shim command: pkill -9 -f "{{ data_dir }}/data/[^/]+/bin/containerd-shim" - when: inventory_hostname in groups['master'] changed_when: "pkill_containerd_shim.rc == 0" failed_when: false @@ -49,7 +37,7 @@ include_tasks: umount_with_children.yml loop: - /run/k3s - - "{{ data_dir if inventory_hostname in groups['master'] else '/var/lib/rancher/k3s' | default('/var/lib/rancher/k3s') }}" + - "{{ data_dir }}" - /var/lib/kubelet/pods - /var/lib/kubelet/plugins - /run/netns/cni- @@ -181,7 +169,7 @@ - /etc/rancher/k3s - /run/k3s - /run/flannel - - "{{ data_dir if inventory_hostname in groups['master'] else '/var/lib/rancher/k3s' | default('/var/lib/rancher/k3s') }}" + - "{{ data_dir }}" - /var/lib/kubelet - "{{ bin_dir }}/k3s" - "{{ bin_dir }}/k3s.sh" From d28c72ae71b674a606992f6700a6746f48524c09 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 15 Dec 2021 22:15:00 -0500 Subject: [PATCH 021/108] Not implementing INSTALL_K3S_SKIP_DOWNLOAD, INSTALL_K3S_FORCE_RESTART, INSTALL_K3S_SKIP_ENABLE, INSTALL_K3S_SKIP_START Signed-off-by: Jon S. Stumpf Squash me Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index 61477d084..7fcceaaad 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -9,15 +9,11 @@ ansible_user: debian # - INSTALL_K3S_SKIP_DOWNLOAD # If set to true will not download k3s hash or binary. -# (NOT YET IMPLEMENTED) -# -#install_k3s_skip_download: false +# (WILL NOT BE IMPLEMENTED) # - INSTALL_K3S_FORCE_RESTART -# If set to true will always restart the K3s service -# (NOT YET IMPLEMENTED) -# -#install_k3s_force_restart: false +# k3s-ansible always restarts the service. +# (WILL NOT BE IMPLEMENTED) # - INSTALL_K3S_SYMLINK # If set to 'skip' will not create symlinks, 'force' will overwrite, @@ -30,16 +26,12 @@ ansible_user: debian #install_k3s_symlink: 'force' # - INSTALL_K3S_SKIP_ENABLE -# If set to true will not enable or start k3s service. -# (NOT YET IMPLEMENTED) -# -#install_k3s_skip_enable: false +# k3s-ansble always enables the service. +# (WILL NOT BE IMPLEMENTED) # - INSTALL_K3S_SKIP_START -# If set to true will not start k3s service. -# (NOT YET IMPLEMENTED) -# -#install_k3s_skip_start: false +# k3s-ansible always starts the service. +# (WILL NOT BE IMPLEMENTED) # - INSTALL_K3S_VERSION # Version of k3s to download from github. Will attempt to download from the From 3b8842f4a7dfb0b1137aef170ac016a9ca1fefb1 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sat, 18 Dec 2021 20:58:29 -0500 Subject: [PATCH 022/108] Added capability to download the version from a channel (e.g., 'stable') Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 8 +++--- .../sample/group_vars/k3s_cluster/all.yml | 5 +++- roles/download/tasks/main.yml | 28 +++++++++++++++++++ 3 files changed, 36 insertions(+), 5 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index 7fcceaaad..3d69f4cd3 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -36,9 +36,9 @@ ansible_user: debian # - INSTALL_K3S_VERSION # Version of k3s to download from github. Will attempt to download from the # stable channel if not specified. -# (IMPLEMENTED: only specifying the version; the channels are not implemented.) +# (IMPLEMENTED) # -#install_k3s_version: v1.22.4+k3s1 +#install_k3s_version: v1.22.5+k3s1 # - INSTALL_K3S_COMMIT # Commit of k3s to download from temporary cloud storage. @@ -106,7 +106,7 @@ ansible_user: debian # Channel URL for fetching k3s download URL. # Defaults to 'https://update.k3s.io/v1-release/channels'. # Must only be https:// URLs -# (NOT YET IMPLEMENTED) +# (IMPLEMENTED) # #install_k3s_channel_url: 'https://update.k3s.io/v1-release/channels' @@ -115,7 +115,7 @@ ansible_user: debian # Defaults to 'stable'. # (NOT YET IMPLEMENTED) # -#install_k3s_channel: 'stable' +#install_k3s_channel: 'latest' ######################################################## # End: k3s-install.sh flags, from https://get.k3s.io # diff --git a/inventory/sample/group_vars/k3s_cluster/all.yml b/inventory/sample/group_vars/k3s_cluster/all.yml index 2bb4d0937..7f9510bd3 100644 --- a/inventory/sample/group_vars/k3s_cluster/all.yml +++ b/inventory/sample/group_vars/k3s_cluster/all.yml @@ -2,7 +2,10 @@ # These variables are not meant to be changed. # All changes should happen in "group_vars/all.yml". -k3s_version: "{{ install_k3s_version | default('v1.22.4+k3s1') }}" +k3s_version: "{{ install_k3s_version | default('undefined') }}" + +k3s_channel_url: "{{ install_k3s_channel_url | default('https://update.k3s.io/v1-release/channels') }}" +k3s_channel: "{{ install_k3s_channel | default('stable') }}" extra_server_args: "{{ install_k3s_server_args | default('') }}" extra_agent_args: "{{ install_k3s_agent_args | default('') }}" diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 3be1b0aa0..2c1f22ec3 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -25,6 +25,34 @@ - ansible_facts.architecture is search("arm") - ansible_facts.userspace_bits == "32" +# Determine version to download +- name: Determine version from channel + vars: + version_url: "{{ k3s_channel_url }}/{{ k3s_channel }}" + shell: "curl -w '%{url_effective}' -L -s -S {{ version_url }} -o /dev/null | sed -e 's|.*/||'" + register: curl_output + when: k3s_version == 'undefined' + run_once: true + +- name: Output channel information + debug: + msg: + - "k3s_channel_url: {{ k3s_channel_url }}" + - "k3s_channel: {{ k3s_channel }}" + when: k3s_version == 'undefined' + run_once: true + +- name: Set version from channel + set_fact: + k3s_version: "{{ curl_output.stdout_lines[0] }}" + when: k3s_version == 'undefined' + run_once: true + +- name: Output version + debug: + msg: "k3s_version: {{ k3s_version }}" + run_once: true + # Set binary and hash file URLs - name: Determine GitHub URLs set_fact: From 588c17e9178842c729a4ca30aa9af7a82151f8eb Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sat, 18 Dec 2021 22:11:32 -0500 Subject: [PATCH 023/108] Changed option install_k3s_symlink to 'skip' Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index 3d69f4cd3..fb9e3a5b4 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -23,7 +23,7 @@ ansible_user: debian # - 'force' is the default behavior # - It does not check if the command already exists in the path) # -#install_k3s_symlink: 'force' +#install_k3s_symlink: 'skip' # - INSTALL_K3S_SKIP_ENABLE # k3s-ansble always enables the service. From 91a189e5e026ae60f83c1ab603c563ff7ecaf5ae Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sat, 18 Dec 2021 22:52:52 -0500 Subject: [PATCH 024/108] Moved site.yml and reset.yml to new playbook directory Signed-off-by: Jon S. Stumpf --- README.md | 2 +- inventory/.gitignore | 1 - .../group_vars/k3s_cluster => playbook/group_vars}/all.yml | 2 +- reset.yml => playbook/reset.yml | 0 site.yml => playbook/site.yml | 0 5 files changed, 2 insertions(+), 3 deletions(-) rename {inventory/sample/group_vars/k3s_cluster => playbook/group_vars}/all.yml (93%) rename reset.yml => playbook/reset.yml (100%) rename site.yml => playbook/site.yml (100%) diff --git a/README.md b/README.md index 8e3c855ce..5fc3d092a 100644 --- a/README.md +++ b/README.md @@ -48,7 +48,7 @@ If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match Start provisioning of the cluster using the following command: ```bash -ansible-playbook site.yml -i inventory/my-cluster/hosts.ini +ansible-playbook playbook/site.yml -i inventory/my-cluster/hosts.ini ``` ## Kubeconfig diff --git a/inventory/.gitignore b/inventory/.gitignore index 4435407cb..5ae5024ee 100644 --- a/inventory/.gitignore +++ b/inventory/.gitignore @@ -1,4 +1,3 @@ * !.gitignore !sample/ -!sample/k3s_cluster/ diff --git a/inventory/sample/group_vars/k3s_cluster/all.yml b/playbook/group_vars/all.yml similarity index 93% rename from inventory/sample/group_vars/k3s_cluster/all.yml rename to playbook/group_vars/all.yml index 7f9510bd3..d059d7626 100644 --- a/inventory/sample/group_vars/k3s_cluster/all.yml +++ b/playbook/group_vars/all.yml @@ -1,6 +1,6 @@ --- # These variables are not meant to be changed. -# All changes should happen in "group_vars/all.yml". +# All changes should happen in "inventory/x/group_vars/all.yml". k3s_version: "{{ install_k3s_version | default('undefined') }}" diff --git a/reset.yml b/playbook/reset.yml similarity index 100% rename from reset.yml rename to playbook/reset.yml diff --git a/site.yml b/playbook/site.yml similarity index 100% rename from site.yml rename to playbook/site.yml From 7b7644248e717c2aef7f697669b449fa5e279243 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 20 Dec 2021 18:05:18 -0500 Subject: [PATCH 025/108] Added k3s_commit so that install_k3s_commit is only used in playbook/group_vars/all.yml (like all other install variables) Signed-off-by: Jon S. Stumpf --- playbook/group_vars/all.yml | 1 + roles/download/tasks/main.yml | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index d059d7626..6da10c88d 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -3,6 +3,7 @@ # All changes should happen in "inventory/x/group_vars/all.yml". k3s_version: "{{ install_k3s_version | default('undefined') }}" +k3s_commit: "{{ install_k3s_commit | default('undefined') }}" k3s_channel_url: "{{ install_k3s_channel_url | default('https://update.k3s.io/v1-release/channels') }}" k3s_channel: "{{ install_k3s_channel | default('stable') }}" diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 2c1f22ec3..31b88054b 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -58,13 +58,13 @@ set_fact: binary_url: "{{ github_url }}/download/{{ k3s_version }}/k3s{{ k3s_suffix }}" hash_url: "{{ github_url }}/download/{{ k3s_version }}/sha256sum-{{ k3s_arch }}.txt" - when: install_k3s_commit is not defined + when: k3s_commit == 'undefined' - name: Determine Storage URLs set_fact: binary_url: "{{ storage_url }}/k3s{{ k3s_suffix }}-{{ install_k3s_commit }}" hash_url: "{{ storage_url }}/k3s{{ k3s_suffix }}-{{ install_k3s_commit }}.sha256sum" - when: install_k3s_commit is defined + when: k3s_commit != 'undefined' - name: Download k3s binary get_url: From 963097534d4d71904c1d9a6804ea3a9beb00a1a8 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 20 Dec 2021 18:53:18 -0500 Subject: [PATCH 026/108] Updated roles/download/tasks/main.yml to use blocks, for clarity Signed-off-by: Jon S. Stumpf --- roles/download/tasks/main.yml | 73 ++++++++++++++++++++--------------- roles/download/vars/main.yml | 2 + 2 files changed, 44 insertions(+), 31 deletions(-) diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 31b88054b..5412d9d1f 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -25,47 +25,58 @@ - ansible_facts.architecture is search("arm") - ansible_facts.userspace_bits == "32" -# Determine version to download -- name: Determine version from channel - vars: - version_url: "{{ k3s_channel_url }}/{{ k3s_channel }}" - shell: "curl -w '%{url_effective}' -L -s -S {{ version_url }} -o /dev/null | sed -e 's|.*/||'" - register: curl_output - when: k3s_version == 'undefined' - run_once: true +- name: Determine version to download + block: + - name: Determine version from channel + vars: + version_url: "{{ k3s_channel_url }}/{{ k3s_channel }}" + block: + - name: Get version from channel + shell: "curl -w '%{url_effective}' -L -s -S {{ version_url }} -o /dev/null | sed -e 's|.*/||'" + register: curl_output -- name: Output channel information - debug: - msg: - - "k3s_channel_url: {{ k3s_channel_url }}" - - "k3s_channel: {{ k3s_channel }}" - when: k3s_version == 'undefined' - run_once: true + - name: Output channel information + debug: + var: version_url + when: report_download_urls -- name: Set version from channel - set_fact: - k3s_version: "{{ curl_output.stdout_lines[0] }}" - when: k3s_version == 'undefined' - run_once: true + - name: Set version from channel + set_fact: + k3s_version: "{{ curl_output.stdout_lines[0] }}" -- name: Output version - debug: - msg: "k3s_version: {{ k3s_version }}" - run_once: true + when: k3s_version == 'undefined' + run_once: true + + - name: Output version + debug: + var: k3s_version + when: report_download_urls + run_once: true + + - name: Determine GitHub URLs + set_fact: + binary_url: "{{ github_url }}/download/{{ k3s_version }}/k3s{{ k3s_suffix }}" + hash_url: "{{ github_url }}/download/{{ k3s_version }}/sha256sum-{{ k3s_arch }}.txt" -# Set binary and hash file URLs -- name: Determine GitHub URLs - set_fact: - binary_url: "{{ github_url }}/download/{{ k3s_version }}/k3s{{ k3s_suffix }}" - hash_url: "{{ github_url }}/download/{{ k3s_version }}/sha256sum-{{ k3s_arch }}.txt" when: k3s_commit == 'undefined' - name: Determine Storage URLs set_fact: - binary_url: "{{ storage_url }}/k3s{{ k3s_suffix }}-{{ install_k3s_commit }}" - hash_url: "{{ storage_url }}/k3s{{ k3s_suffix }}-{{ install_k3s_commit }}.sha256sum" + binary_url: "{{ storage_url }}/k3s{{ k3s_suffix }}-{{ k3s_commit }}" + hash_url: "{{ storage_url }}/k3s{{ k3s_suffix }}-{{ k3s_commit }}.sha256sum" when: k3s_commit != 'undefined' +- name: Output URLs + block: + - name: Binary URL + debug: + var: binary_url + + - name: Hash URL + debug: + var: hash_url + when: report_download_urls + - name: Download k3s binary get_url: url: "{{ binary_url }}" diff --git a/roles/download/vars/main.yml b/roles/download/vars/main.yml index 7385483d0..ae0c7022e 100644 --- a/roles/download/vars/main.yml +++ b/roles/download/vars/main.yml @@ -3,3 +3,5 @@ github_url: "https://github.com/k3s-io/k3s/releases" storage_url: "https://storage.googleapis.com/k3s-ci-builds" +report_download_urls: false + From 6915b5f259e19aea64398d119f0b1f037fe71308 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 20 Dec 2021 19:14:39 -0500 Subject: [PATCH 027/108] Added capability to copy the master kubeconfig to the localhost in playbook/cluster.conf Signed-off-by: Jon S. Stumpf --- README.md | 4 ++-- playbook/.gitignore | 1 + playbook/group_vars/all.yml | 3 +++ roles/k3s/master/tasks/main.yml | 8 ++++++++ 4 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 playbook/.gitignore diff --git a/README.md b/README.md index 5fc3d092a..e4cf087fc 100644 --- a/README.md +++ b/README.md @@ -53,8 +53,8 @@ ansible-playbook playbook/site.yml -i inventory/my-cluster/hosts.ini ## Kubeconfig -To get access to your **Kubernetes** cluster just +To get access to your new **Kubernetes** cluster, just use the generated kube config. ```bash -scp debian@master_ip:~/.kube/config ~/.kube/config +kubectl --kubeconfig playbook/cluster.conf ... ``` diff --git a/playbook/.gitignore b/playbook/.gitignore new file mode 100644 index 000000000..e2aad310e --- /dev/null +++ b/playbook/.gitignore @@ -0,0 +1 @@ +cluster.conf diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index 6da10c88d..22b8c3baa 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -18,6 +18,9 @@ data_dir: "{{ install_k3s_data_dir | default('/var/lib/rancher/k3s') }}" symlink: "{{ install_k3s_symlink | default('force') }}" bin_dir_read_only: "{{ install_k3s_bin_dir_read_only | default(false) }}" +# The location of where to capture the new cluster information +cluster_config: cluster.conf + master_ip: "{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}" # Services information diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index 1c06a325b..ab7c6eacb 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -111,3 +111,11 @@ --kubeconfig ~{{ ansible_user }}/.kube/config changed_when: true +# Fetch a copy of the cluster config for use in one's ~/.kube/config. +- name: Copy .kube/config for new cluster + fetch: + src: "~{{ ansible_user }}/.kube/config" + dest: "{{ cluster_config }}" + flat: yes + run_once: true + From 355e3d3c97be1b2e9e02207297686e6a1f469fb9 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 20 Dec 2021 19:22:06 -0500 Subject: [PATCH 028/108] Fixed some comments Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index fb9e3a5b4..eaf089b97 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -1,6 +1,6 @@ --- -# This is the SSH user used by ansible to configure your hosts +# This is the user that has SSH password-less access to configure your hosts ansible_user: debian ######################################################## @@ -117,9 +117,9 @@ ansible_user: debian # #install_k3s_channel: 'latest' -######################################################## +###################################################### # End: k3s-install.sh flags, from https://get.k3s.io # -######################################################## +###################################################### # - INSTALL_K3S_DATA_DIR # Change the data directory for the k3s service. Defaults to '/var/lib/rancher/k3s'. From f961f76eaf7d31b429bc03fa5fa7ba93120e2c81 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 20 Dec 2021 20:36:05 -0500 Subject: [PATCH 029/108] Removed install_bin_dir_read_only flag Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 7 ++----- playbook/group_vars/all.yml | 1 - roles/k3s/master/tasks/main.yml | 4 +--- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index eaf089b97..460ecac8b 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -55,11 +55,8 @@ ansible_user: debian #install_k3s_bin_dir: '/usr/local/bin' # - INSTALL_K3S_BIN_DIR_READ_ONLY -# If set to true will not write files to INSTALL_K3S_BIN_DIR, forces -# setting INSTALL_K3S_SKIP_DOWNLOAD=true -# (IMPLEMENTED) -# -#install_k3s_bin_dir_read_only: false +# k3s-ansible expects k3s_bin_dir to be writable. +# (WILL NOT BE IMPLEMENTED) # - INSTALL_K3S_SYSTEMD_DIR # Directory to install systemd service and environment files to, or use diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index 22b8c3baa..6b0c77cb4 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -16,7 +16,6 @@ bin_dir: "{{ install_k3s_bin_dir | default('/usr/local/bin') }}" data_dir: "{{ install_k3s_data_dir | default('/var/lib/rancher/k3s') }}" symlink: "{{ install_k3s_symlink | default('force') }}" -bin_dir_read_only: "{{ install_k3s_bin_dir_read_only | default(false) }}" # The location of where to capture the new cluster information cluster_config: cluster.conf diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index ab7c6eacb..faff53d09 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -81,9 +81,7 @@ - kubectl - crictl - ctr - when: - - not bin_dir_read_only - - not (symlink == 'skip') + when: not (symlink == 'skip') ################################################################################ # Setup {{ ansible_user }}/.kube/config From dd73c1f2154f7725f678d597f95176305a1a854c Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 20 Dec 2021 20:41:16 -0500 Subject: [PATCH 030/108] Removed install_k3s_symlink flag Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 7 ++----- playbook/group_vars/all.yml | 2 -- roles/k3s/master/tasks/main.yml | 1 - 3 files changed, 2 insertions(+), 8 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index 460ecac8b..cab50054c 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -18,12 +18,9 @@ ansible_user: debian # - INSTALL_K3S_SYMLINK # If set to 'skip' will not create symlinks, 'force' will overwrite, # default will symlink if command does not exist in path. -# (Partially IMPLEMENTED: -# - 'force' and 'skip' are implemented -# - 'force' is the default behavior -# - It does not check if the command already exists in the path) +# (NOT YET IMPLEMENTED) # -#install_k3s_symlink: 'skip' +#install_k3s_symlink: # - INSTALL_K3S_SKIP_ENABLE # k3s-ansble always enables the service. diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index 6b0c77cb4..babe107b2 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -15,8 +15,6 @@ systemd_dir: "{{ install_k3s_systemd_dir | default('/etc/systemd/system') }}" bin_dir: "{{ install_k3s_bin_dir | default('/usr/local/bin') }}" data_dir: "{{ install_k3s_data_dir | default('/var/lib/rancher/k3s') }}" -symlink: "{{ install_k3s_symlink | default('force') }}" - # The location of where to capture the new cluster information cluster_config: cluster.conf diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index faff53d09..363f56cf5 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -81,7 +81,6 @@ - kubectl - crictl - ctr - when: not (symlink == 'skip') ################################################################################ # Setup {{ ansible_user }}/.kube/config From ca445efd4eb72eda724cdceaae4992dfa7c3d78e Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 20 Dec 2021 20:55:28 -0500 Subject: [PATCH 031/108] In inventory/sample/group_vars, simplified all.yml and added README.md to explain the options Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/README.md | 78 +++++++++++++++++ inventory/sample/group_vars/all.yml | 119 ++------------------------ 2 files changed, 87 insertions(+), 110 deletions(-) create mode 100644 inventory/sample/group_vars/README.md diff --git a/inventory/sample/group_vars/README.md b/inventory/sample/group_vars/README.md new file mode 100644 index 000000000..329c75892 --- /dev/null +++ b/inventory/sample/group_vars/README.md @@ -0,0 +1,78 @@ + +## Introduction + +`inventory/x/group_vars/all.yml` is meant to be modified appropriately for your environment. +If you are familiar with installing **k3s** from [https://get.k3s.io/](https://get.k3s.io/), +some install flags have been implemented here. + +ansible variables that were previously here have moved to `playbook/group_vars/all.yml`. +Those variables are used within the playbooks and roles are not meant to be changed by a user of **k3s-ansible**. +When adding a new _install_ variable, a corresponding variable is added to `playbook/group_vars/all.yml` +which is then used throughout **k3s-ansible**. + +## General flags + +**k3s-ansible** requires SSH password-less access to configure your hosts. +Use **ansible_user** to specify the username with this access. + + +### Flags that control the version of k3s downloaded + +There are four (4) flags that control which version of **k3s** is installed on your hosts. + +- **install_k3s_commit**: specifies the commit of **k3s** to download from temporary cloud storage. +The default is to leave this `undefined` as this flag is for developers and QA use. + +- **install_k3s_version**: specifies the version of **k3s** to download from Github. +If undefined (the default), ansible will attempt to download from a channel. + +- **install_k3s_channel_url**: specifies the URL for the channels. +The default is [https://update.k3s.io/v1-release/channels](https://update.k3s.io/v1-release/channels) +It is not something typically changed but is implemented for completeness sake. + +- **install_k3s_channel**: specifies the channel from which to get the version. +The default is the `stable` channel. A typical channel used is `latest`. + +### Flags that change the location of binaries and data + +There are three (3) flags that change the default location of files. + +- **install_k3s_bin_dir**: specifies the directory to install the **k3s** binary and links. +The default is `/usr/local/bin`. + +- **install_k3s_systemd_dir**: specifies the directory to install **systemd** +service and environment files. The default is `/etc/systemd/system`. + +- **install_k3s_data_dir**: specifies the data director for the **k3s** service. +This defaults to `/var/lib/rancher/k3s` and is not (yet) a flag in **k3s-io/k3s**. + +### Flags for the k3s executable + +The install script from [https://get.k3s.io/](https://get.k3s.io/) has one flag to +provide extra arguments to the **k3s** executable. **k3s-ansible** uses two flags, +one for the server and one for the agent(s). These are: + +- **install_k3s_server_args**: Default is ''. +- **install_k3s_agent_args**: Default is ''. + + +## Other flags that were considered from [https://get.k3s.io/](https://get.k3s.io/) + +### Flags not yet implemented + +The flags that have yet to be implemented are: + +- install_k3s_skip_selinux_rpm: If set to true, ansible will skip automatic installation of the **k3s** RPM. +- install_k3s_selinux_warn: If set to true, ansible will continue if the **k3s-selinux** policy is not found. +- install_k3s_name: specifies the name of systemd service to create. +- install_k3s_type: specifies the type of systemd service to create. + +### Flags that will not be implemented + +Lastly, some flags did not make sense to implement with **k3s-ansible**: + +- install_k3s_skip_download: k3s-ansible always downloads the **k3s** binary and its hash. +- install_k3s_force_restart: k3s-ansible always restarts the service. +- install_k3s_skip_enable: k3s-ansible always enables the service. +- install_k3s_skip_start: k3s-ansible always starts the service. + diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index cab50054c..b6541795b 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -3,122 +3,21 @@ # This is the user that has SSH password-less access to configure your hosts ansible_user: debian -######################################################## -# Begin: k3s-install.sh flags, from https://get.k3s.io # -######################################################## +######################################## +# Install flags, like https://get.k3s.io -# - INSTALL_K3S_SKIP_DOWNLOAD -# If set to true will not download k3s hash or binary. -# (WILL NOT BE IMPLEMENTED) - -# - INSTALL_K3S_FORCE_RESTART -# k3s-ansible always restarts the service. -# (WILL NOT BE IMPLEMENTED) - -# - INSTALL_K3S_SYMLINK -# If set to 'skip' will not create symlinks, 'force' will overwrite, -# default will symlink if command does not exist in path. -# (NOT YET IMPLEMENTED) -# -#install_k3s_symlink: - -# - INSTALL_K3S_SKIP_ENABLE -# k3s-ansble always enables the service. -# (WILL NOT BE IMPLEMENTED) - -# - INSTALL_K3S_SKIP_START -# k3s-ansible always starts the service. -# (WILL NOT BE IMPLEMENTED) - -# - INSTALL_K3S_VERSION -# Version of k3s to download from github. Will attempt to download from the -# stable channel if not specified. -# (IMPLEMENTED) -# +# Flags that control the version of k3s downloaded +#install_k3s_channel_url: 'https://update.k3s.io/v1-release/channels' +#install_k3s_channel: 'latest' #install_k3s_version: v1.22.5+k3s1 - -# - INSTALL_K3S_COMMIT -# Commit of k3s to download from temporary cloud storage. -# * (for developer & QA use) -# (IMPLEMENTED: need the full commit #) -# #install_k3s_commit: -# - INSTALL_K3S_BIN_DIR -# Directory to install k3s binary, links, and uninstall script to, or use -# /usr/local/bin as the default -# (IMPLEMENTED) -# -#install_k3s_bin_dir: '/usr/local/bin' - -# - INSTALL_K3S_BIN_DIR_READ_ONLY -# k3s-ansible expects k3s_bin_dir to be writable. -# (WILL NOT BE IMPLEMENTED) - -# - INSTALL_K3S_SYSTEMD_DIR -# Directory to install systemd service and environment files to, or use -# /etc/systemd/system as the default -# (IMPLEMENTED) -# +# Flags that control the location of things #install_k3s_systemd_dir: '/etc/systemd/system' +#install_k3s_bin_dir: '/usr/local/bin' +#install_k3s_data_dir: '/var/lib/rancher/k3s' -# - INSTALL_K3S_EXEC -# This is replaced by specific variables for servers and agents. -# These will provide extra arguments to the "k3s" application. -# (IMPLEMENTED) -# +# Flags for the k3s executable #install_k3s_server_args: '' #install_k3s_agent_args: '' -# - INSTALL_K3S_NAME -# Name of systemd service to create, will default from the k3s exec command -# if not specified. If specified the name will be prefixed with 'k3s-'. -# (NOT YET IMPLEMENTED) -# -#install_k3s_name: '' - -# - INSTALL_K3S_TYPE -# Type of systemd service to create, will default from the k3s exec command -# if not specified. -# (NOT YET IMPLEMENTED) -# -#install_k3s_type: '' - -# - INSTALL_K3S_SELINUX_WARN -# If set to true will continue if k3s-selinux policy is not found. -# (NOT YET IMPLEMENTED) -# -#install_k3s_selinux_warn: false - -# - INSTALL_K3S_SKIP_SELINUX_RPM -# If set to true will skip automatic installation of the k3s RPM. -# (NOT YET IMPLEMENTED) -# -#install_k3s_skip_selinux_rpm: false - -# - INSTALL_K3S_CHANNEL_URL -# Channel URL for fetching k3s download URL. -# Defaults to 'https://update.k3s.io/v1-release/channels'. -# Must only be https:// URLs -# (IMPLEMENTED) -# -#install_k3s_channel_url: 'https://update.k3s.io/v1-release/channels' - -# - INSTALL_K3S_CHANNEL -# Channel to use for fetching k3s download URL. -# Defaults to 'stable'. -# (NOT YET IMPLEMENTED) -# -#install_k3s_channel: 'latest' - -###################################################### -# End: k3s-install.sh flags, from https://get.k3s.io # -###################################################### - -# - INSTALL_K3S_DATA_DIR -# Change the data directory for the k3s service. Defaults to '/var/lib/rancher/k3s'. -# TODO: submit PR to k3s-io/k3s -# (IMPLEMENTED) -# -#install_k3s_data_dir: '/var/lib/rancher/k3s' - From ae2b55f202b93d204151c22e5e20861d2771010e Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 20 Dec 2021 21:38:50 -0500 Subject: [PATCH 032/108] Moved cluster_config from playbook/group_vars/all.yml to inventory/sample/group_vars/all.yml Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/README.md | 6 ++++-- inventory/sample/group_vars/all.yml | 3 +++ playbook/group_vars/all.yml | 3 --- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/inventory/sample/group_vars/README.md b/inventory/sample/group_vars/README.md index 329c75892..e01b6daa4 100644 --- a/inventory/sample/group_vars/README.md +++ b/inventory/sample/group_vars/README.md @@ -12,9 +12,11 @@ which is then used throughout **k3s-ansible**. ## General flags -**k3s-ansible** requires SSH password-less access to configure your hosts. -Use **ansible_user** to specify the username with this access. +- **ansible_user**: specifies the username that has SSH password-less access to configure your hosts. +The default is `debian`. +- **cluster_config**: specifies the location of where to capture the kube config of the new cluster. +The default is `playbook/cluster.conf`. ### Flags that control the version of k3s downloaded diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index b6541795b..894a587af 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -3,6 +3,9 @@ # This is the user that has SSH password-less access to configure your hosts ansible_user: debian +# The location of where to capture the kube config of the new cluster +cluster_config: cluster.conf + ######################################## # Install flags, like https://get.k3s.io diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index babe107b2..a27b7adec 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -15,9 +15,6 @@ systemd_dir: "{{ install_k3s_systemd_dir | default('/etc/systemd/system') }}" bin_dir: "{{ install_k3s_bin_dir | default('/usr/local/bin') }}" data_dir: "{{ install_k3s_data_dir | default('/var/lib/rancher/k3s') }}" -# The location of where to capture the new cluster information -cluster_config: cluster.conf - master_ip: "{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}" # Services information From f62942bdffd949e2db42f7cf4587e9785393a5c2 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 20 Dec 2021 22:17:39 -0500 Subject: [PATCH 033/108] Created shorter, simpler inventory/sample/group_vars/all.yml Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index 894a587af..bb1700cb1 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -1,26 +1,17 @@ --- +# See README.md for more options + # This is the user that has SSH password-less access to configure your hosts ansible_user: debian # The location of where to capture the kube config of the new cluster cluster_config: cluster.conf -######################################## -# Install flags, like https://get.k3s.io - -# Flags that control the version of k3s downloaded -#install_k3s_channel_url: 'https://update.k3s.io/v1-release/channels' -#install_k3s_channel: 'latest' -#install_k3s_version: v1.22.5+k3s1 -#install_k3s_commit: - -# Flags that control the location of things -#install_k3s_systemd_dir: '/etc/systemd/system' -#install_k3s_bin_dir: '/usr/local/bin' -#install_k3s_data_dir: '/var/lib/rancher/k3s' +# Use the latest version +install_k3s_channel: 'latest' # Flags for the k3s executable -#install_k3s_server_args: '' -#install_k3s_agent_args: '' +install_k3s_server_args: '' +install_k3s_agent_args: '' From f60e947237f3306a7c48511feba95a5afa928f20 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 20 Dec 2021 22:25:47 -0500 Subject: [PATCH 034/108] Replace shell call to 'curl' with ansible.builtin.uri Signed-off-by: Jon S. Stumpf --- roles/download/tasks/main.yml | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 5412d9d1f..dac20476b 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -31,18 +31,22 @@ vars: version_url: "{{ k3s_channel_url }}/{{ k3s_channel }}" block: - - name: Get version from channel - shell: "curl -w '%{url_effective}' -L -s -S {{ version_url }} -o /dev/null | sed -e 's|.*/||'" - register: curl_output - - name: Output channel information debug: var: version_url when: report_download_urls + - name: Get version from channel + uri: + url: "{{ version_url }}" + follow_redirects: safe + force: true + return_content: false + register: channel_version_info + - name: Set version from channel set_fact: - k3s_version: "{{ curl_output.stdout_lines[0] }}" + k3s_version: "{{ channel_version_info.url.split('/')[-1] }}" when: k3s_version == 'undefined' run_once: true From c16357903889b076320f440f03cb2ef2f5430d95 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Tue, 21 Dec 2021 00:09:09 -0500 Subject: [PATCH 035/108] node-token does not need to be executable Signed-off-by: Jon S. Stumpf --- roles/k3s/master/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/master/tasks/main.yml index 363f56cf5..f8ffd5494 100644 --- a/roles/k3s/master/tasks/main.yml +++ b/roles/k3s/master/tasks/main.yml @@ -33,10 +33,10 @@ path: "{{ data_dir }}/server/node-token" register: p -- name: Change file access node-token +- name: Make node-token world-readable file: path: "{{ data_dir }}/server/node-token" - mode: "g+rx,o+rx" + mode: 0644 - name: Read node-token from master slurp: From d43772e6a47d3376ff84e1a0062d29661626ce81 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Tue, 21 Dec 2021 09:51:28 -0500 Subject: [PATCH 036/108] Moved roles/download/vars to roles/download/defaults Signed-off-by: Jon S. Stumpf --- roles/download/{vars => defaults}/main.yml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename roles/download/{vars => defaults}/main.yml (100%) diff --git a/roles/download/vars/main.yml b/roles/download/defaults/main.yml similarity index 100% rename from roles/download/vars/main.yml rename to roles/download/defaults/main.yml From 3c072a403a295f823e01019f348f703b9970b567 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 15 Dec 2021 20:28:15 -0500 Subject: [PATCH 037/108] Renamed roles k3s/master and k3s/node to k3s/server and k3s/agent, respectively Signed-off-by: Jon S. Stumpf --- playbook/site.yml | 4 ++-- roles/k3s/{node => agent}/tasks/main.yml | 0 roles/k3s/{node => agent}/templates/k3s.service.j2 | 0 roles/k3s/{master => server}/tasks/main.yml | 0 roles/k3s/{master => server}/templates/k3s.service.j2 | 0 roles/k3s/{master => server}/templates/k3s.sh.j2 | 0 6 files changed, 2 insertions(+), 2 deletions(-) rename roles/k3s/{node => agent}/tasks/main.yml (100%) rename roles/k3s/{node => agent}/templates/k3s.service.j2 (100%) rename roles/k3s/{master => server}/tasks/main.yml (100%) rename roles/k3s/{master => server}/templates/k3s.service.j2 (100%) rename roles/k3s/{master => server}/templates/k3s.sh.j2 (100%) diff --git a/playbook/site.yml b/playbook/site.yml index 31cc96ef8..a9f016668 100644 --- a/playbook/site.yml +++ b/playbook/site.yml @@ -11,9 +11,9 @@ - hosts: master become: yes roles: - - role: k3s/master + - role: k3s/server - hosts: node become: yes roles: - - role: k3s/node + - role: k3s/agent diff --git a/roles/k3s/node/tasks/main.yml b/roles/k3s/agent/tasks/main.yml similarity index 100% rename from roles/k3s/node/tasks/main.yml rename to roles/k3s/agent/tasks/main.yml diff --git a/roles/k3s/node/templates/k3s.service.j2 b/roles/k3s/agent/templates/k3s.service.j2 similarity index 100% rename from roles/k3s/node/templates/k3s.service.j2 rename to roles/k3s/agent/templates/k3s.service.j2 diff --git a/roles/k3s/master/tasks/main.yml b/roles/k3s/server/tasks/main.yml similarity index 100% rename from roles/k3s/master/tasks/main.yml rename to roles/k3s/server/tasks/main.yml diff --git a/roles/k3s/master/templates/k3s.service.j2 b/roles/k3s/server/templates/k3s.service.j2 similarity index 100% rename from roles/k3s/master/templates/k3s.service.j2 rename to roles/k3s/server/templates/k3s.service.j2 diff --git a/roles/k3s/master/templates/k3s.sh.j2 b/roles/k3s/server/templates/k3s.sh.j2 similarity index 100% rename from roles/k3s/master/templates/k3s.sh.j2 rename to roles/k3s/server/templates/k3s.sh.j2 From b740fc2085d95796f35a7c8e73b5a2c40539aa1b Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 15 Dec 2021 21:10:28 -0500 Subject: [PATCH 038/108] Renamed host groups master and node to k3s_server and k3s_agent, respectively; Fixed k3s-io/k3s-ansible/#104 Signed-off-by: Jon S. Stumpf --- README.md | 10 +++++----- inventory/sample/hosts.ini | 8 ++++---- playbook/group_vars/all.yml | 4 ++-- playbook/site.yml | 4 ++-- roles/k3s/agent/templates/k3s.service.j2 | 2 +- roles/k3s/server/tasks/main.yml | 4 ++-- roles/reset/tasks/main.yml | 2 +- 7 files changed, 17 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index e4cf087fc..6344b3957 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ on processor architecture: ## System requirements Deployment environment must have Ansible 2.4.0+ -Master and nodes must have passwordless SSH access +Server and agents must have passwordless SSH access ## Usage @@ -32,15 +32,15 @@ cp -R inventory/sample inventory/my-cluster Second, edit `inventory/my-cluster/hosts.ini` to match the system information gathered above. For example: ```bash -[master] +[k3s_server] 192.16.35.12 -[node] +[k3s_agent] 192.16.35.[10:11] [k3s_cluster:children] -master -node +k3s_server +k3s_agent ``` If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment. diff --git a/inventory/sample/hosts.ini b/inventory/sample/hosts.ini index b015890d6..8c1671269 100644 --- a/inventory/sample/hosts.ini +++ b/inventory/sample/hosts.ini @@ -1,12 +1,12 @@ -[master] +[k3s_server] 192.168.1.26 -[node] +[k3s_agent] 192.168.1.34 192.168.1.39 192.168.1.16 192.168.1.32 [k3s_cluster:children] -master -node +k3s_server +k3s_agent diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index a27b7adec..d9800135c 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -15,12 +15,12 @@ systemd_dir: "{{ install_k3s_systemd_dir | default('/etc/systemd/system') }}" bin_dir: "{{ install_k3s_bin_dir | default('/usr/local/bin') }}" data_dir: "{{ install_k3s_data_dir | default('/var/lib/rancher/k3s') }}" -master_ip: "{{ hostvars[groups['master'][0]]['ansible_host'] | default(groups['master'][0]) }}" +master_ip: "{{ hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) }}" # Services information k3s_services: - k3s - - k3s-node + - k3s-agent k3s_service_file_extensions: - service diff --git a/playbook/site.yml b/playbook/site.yml index a9f016668..4f8694b47 100644 --- a/playbook/site.yml +++ b/playbook/site.yml @@ -8,12 +8,12 @@ - role: download - role: raspberrypi -- hosts: master +- hosts: k3s_server become: yes roles: - role: k3s/server -- hosts: node +- hosts: k3s_agent become: yes roles: - role: k3s/agent diff --git a/roles/k3s/agent/templates/k3s.service.j2 b/roles/k3s/agent/templates/k3s.service.j2 index 4c87b1e84..90ac2059f 100644 --- a/roles/k3s/agent/templates/k3s.service.j2 +++ b/roles/k3s/agent/templates/k3s.service.j2 @@ -7,7 +7,7 @@ After=network-online.target Type=notify ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe overlay -ExecStart={{ bin_dir }}/k3s agent --data-dir "{{ data_dir }}" --server https://{{ master_ip }}:6443 --token {{ hostvars[groups['master'][0]]['token'] }} {{ extra_agent_args | default("") }} +ExecStart={{ bin_dir }}/k3s agent --data-dir "{{ data_dir }}" --server https://{{ master_ip }}:6443 --token {{ hostvars[groups['k3s_server'][0]]['token'] }} {{ extra_agent_args | default("") }} KillMode=process Delegate=yes # Having non-zero Limit*s causes performance problems due to accounting overhead diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index f8ffd5494..3e1115b6d 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -38,12 +38,12 @@ path: "{{ data_dir }}/server/node-token" mode: 0644 -- name: Read node-token from master +- name: Read node-token from the server slurp: path: "{{ data_dir }}/server/node-token" register: node_token -- name: Store Master node-token +- name: Store the server node-token set_fact: token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}" diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 21d5e3696..298c2735a 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -180,7 +180,7 @@ file: path: "~{{ ansible_user }}/.kube/config" state: absent - when: inventory_hostname in groups['master'] + when: inventory_hostname in groups['k3s_server'] - name: Remove package k3s-selinux yum: From b307aebab36967eefb11bb01b1a0235bb2a9948c Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 15 Dec 2021 21:17:22 -0500 Subject: [PATCH 039/108] Renamed k3s-node service to k3s-agent Signed-off-by: Jon S. Stumpf --- roles/k3s/agent/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/k3s/agent/tasks/main.yml b/roles/k3s/agent/tasks/main.yml index 0ce8e08d0..773457db5 100644 --- a/roles/k3s/agent/tasks/main.yml +++ b/roles/k3s/agent/tasks/main.yml @@ -3,14 +3,14 @@ - name: Copy K3s service file template: src: "k3s.service.j2" - dest: "{{ systemd_dir }}/k3s-node.service" + dest: "{{ systemd_dir }}/k3s-agent.service" owner: root group: root mode: 0755 - name: Enable and check K3s service systemd: - name: k3s-node + name: k3s-agent daemon_reload: yes state: restarted enabled: yes From 13026c8fde0701979d11dd4de8c5acf96648f6c2 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sat, 18 Dec 2021 23:52:38 -0500 Subject: [PATCH 040/108] Renamed master_ip to server_ip Signed-off-by: Jon S. Stumpf --- playbook/group_vars/all.yml | 2 +- roles/k3s/agent/templates/k3s.service.j2 | 2 +- roles/k3s/server/tasks/main.yml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index d9800135c..3030fa92e 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -15,7 +15,7 @@ systemd_dir: "{{ install_k3s_systemd_dir | default('/etc/systemd/system') }}" bin_dir: "{{ install_k3s_bin_dir | default('/usr/local/bin') }}" data_dir: "{{ install_k3s_data_dir | default('/var/lib/rancher/k3s') }}" -master_ip: "{{ hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) }}" +server_ip: "{{ hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) }}" # Services information k3s_services: diff --git a/roles/k3s/agent/templates/k3s.service.j2 b/roles/k3s/agent/templates/k3s.service.j2 index 90ac2059f..58a0f0514 100644 --- a/roles/k3s/agent/templates/k3s.service.j2 +++ b/roles/k3s/agent/templates/k3s.service.j2 @@ -7,7 +7,7 @@ After=network-online.target Type=notify ExecStartPre=-/sbin/modprobe br_netfilter ExecStartPre=-/sbin/modprobe overlay -ExecStart={{ bin_dir }}/k3s agent --data-dir "{{ data_dir }}" --server https://{{ master_ip }}:6443 --token {{ hostvars[groups['k3s_server'][0]]['token'] }} {{ extra_agent_args | default("") }} +ExecStart={{ bin_dir }}/k3s agent --data-dir "{{ data_dir }}" --server https://{{ server_ip }}:6443 --token {{ hostvars[groups['k3s_server'][0]]['token'] }} {{ extra_agent_args | default("") }} KillMode=process Delegate=yes # Having non-zero Limit*s causes performance problems due to accounting overhead diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index 3e1115b6d..bf0d2b78a 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -101,10 +101,10 @@ owner: "{{ ansible_user }}" mode: "u=rw,g=,o=" -- name: Replace https://localhost:6443 by https://master-ip:6443 +- name: Replace https://localhost:6443 by https://server_ip:6443 command: >- {{ bin_dir }}/kubectl config set-cluster default - --server=https://{{ master_ip }}:6443 + --server=https://{{ server_ip }}:6443 --kubeconfig ~{{ ansible_user }}/.kube/config changed_when: true From 2e39548994dc658e0e890b1c4f6826a9f1540624 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 20 Dec 2021 23:27:46 -0500 Subject: [PATCH 041/108] Only reload k3s service when service file(s) have changed Signed-off-by: Jon S. Stumpf --- roles/k3s/agent/tasks/main.yml | 7 ++++--- roles/k3s/server/tasks/main.yml | 4 ++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/roles/k3s/agent/tasks/main.yml b/roles/k3s/agent/tasks/main.yml index 773457db5..2a5715976 100644 --- a/roles/k3s/agent/tasks/main.yml +++ b/roles/k3s/agent/tasks/main.yml @@ -1,16 +1,17 @@ --- - name: Copy K3s service file + register: k3s_agent_service template: src: "k3s.service.j2" dest: "{{ systemd_dir }}/k3s-agent.service" owner: root group: root - mode: 0755 + mode: 0644 - name: Enable and check K3s service systemd: name: k3s-agent - daemon_reload: yes - state: restarted + daemon_reload: "{{ 'yes' if k3s_agent_service.changed else 'no' }}" + state: "{{ 'restarted' if k3s_agent_service.changed else 'started' }}" enabled: yes diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index bf0d2b78a..4bc8a255c 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -16,8 +16,8 @@ - name: Enable and check K3s service systemd: name: k3s - daemon_reload: yes - state: restarted + daemon_reload: "{{ 'yes' if k3s_service.changed else 'no' }}" + state: "{{ 'restarted' if k3s_service.changed else 'started' }}" enabled: yes ################################################################################ From be9855c8af67d2a1da4748daf949a88cc2cb3d59 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 20 Dec 2021 23:28:26 -0500 Subject: [PATCH 042/108] Only forward ipv6 when there are ipv6 interfaces Signed-off-by: Jon S. Stumpf --- roles/prereq/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/prereq/tasks/main.yml b/roles/prereq/tasks/main.yml index 8718db325..690145f47 100644 --- a/roles/prereq/tasks/main.yml +++ b/roles/prereq/tasks/main.yml @@ -17,7 +17,7 @@ value: "1" state: present reload: yes - when: ansible_all_ipv6_addresses + when: ansible_all_ipv6_addresses | length > 0 - name: Add br_netfilter to /etc/modules-load.d/ copy: From 99fa0c12ee1729f419383838ba5d3f37205c93c2 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 20 Dec 2021 23:43:28 -0500 Subject: [PATCH 043/108] Some check_mode tweaks Signed-off-by: Jon S. Stumpf --- roles/download/tasks/main.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index dac20476b..013d9fcf5 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -63,12 +63,14 @@ hash_url: "{{ github_url }}/download/{{ k3s_version }}/sha256sum-{{ k3s_arch }}.txt" when: k3s_commit == 'undefined' + check_mode: no - name: Determine Storage URLs set_fact: binary_url: "{{ storage_url }}/k3s{{ k3s_suffix }}-{{ k3s_commit }}" hash_url: "{{ storage_url }}/k3s{{ k3s_suffix }}-{{ k3s_commit }}.sha256sum" when: k3s_commit != 'undefined' + check_mode: no - name: Output URLs block: @@ -80,6 +82,7 @@ debug: var: hash_url when: report_download_urls + check_mode: no - name: Download k3s binary get_url: From dafce83159d60f4b637590393ec646fe9d1d8d5b Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Tue, 21 Dec 2021 20:10:15 -0500 Subject: [PATCH 044/108] Reduced differences in OS prereq task files Signed-off-by: Jon S. Stumpf --- roles/raspberrypi/tasks/prereq/CentOS.yml | 2 +- roles/raspberrypi/tasks/prereq/Raspbian.yml | 4 ++-- roles/raspberrypi/tasks/prereq/Ubuntu.yml | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/roles/raspberrypi/tasks/prereq/CentOS.yml b/roles/raspberrypi/tasks/prereq/CentOS.yml index af83564b8..9c83fdec7 100644 --- a/roles/raspberrypi/tasks/prereq/CentOS.yml +++ b/roles/raspberrypi/tasks/prereq/CentOS.yml @@ -1,5 +1,5 @@ --- -- name: Enable cgroup via boot commandline if not already enabled for Centos +- name: Enable cgroup support if not already enabled lineinfile: path: /boot/cmdline.txt backrefs: yes diff --git a/roles/raspberrypi/tasks/prereq/Raspbian.yml b/roles/raspberrypi/tasks/prereq/Raspbian.yml index 42bfe7d1d..dacaf2dd2 100644 --- a/roles/raspberrypi/tasks/prereq/Raspbian.yml +++ b/roles/raspberrypi/tasks/prereq/Raspbian.yml @@ -1,10 +1,10 @@ --- -- name: Activating cgroup support +- name: Enable cgroup support if not already enabled lineinfile: path: /boot/cmdline.txt + backrefs: yes regexp: '^((?!.*\bcgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory\b).*)$' line: '\1 cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory' - backrefs: true notify: reboot - name: Flush iptables before changing to iptables-legacy diff --git a/roles/raspberrypi/tasks/prereq/Ubuntu.yml b/roles/raspberrypi/tasks/prereq/Ubuntu.yml index 742fc2177..87d46c6d5 100644 --- a/roles/raspberrypi/tasks/prereq/Ubuntu.yml +++ b/roles/raspberrypi/tasks/prereq/Ubuntu.yml @@ -1,5 +1,5 @@ --- -- name: Enable cgroup via boot commandline if not already enabled for Ubuntu on a Raspberry Pi +- name: Enable cgroup support if not already enabled lineinfile: path: /boot/firmware/cmdline.txt backrefs: yes From 3a6b0aa32ab1701a0d784f621e0654a1a268c097 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Tue, 21 Dec 2021 22:49:57 -0500 Subject: [PATCH 045/108] Mitigated bug with ip-netns Signed-off-by: Jon S. Stumpf --- roles/reset/tasks/main.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 298c2735a..9d8b5c5b1 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -52,17 +52,21 @@ register: ip_netns_show command: ip -j netns show master cni0 +# +# BUG: Possible bug in ip-netns(8) on Raspbian. +# "ip -j netns show master cni0" does not always report "[ ]" but returns 0 when there is no master. +# - name: Remove CNI namespaces command: ip netns delete {{ item }} - loop: "{{ ip_netns_show.stdout | from_json | json_query('[*].name') }}" + loop: "{{ (ip_netns_show.stdout if ip_netns_show.stdout != '' else '[ ]') | from_json | json_query('[*].name') }}" # # Remove CNI interfaces # ip link show 2>/dev/null | grep 'master cni0' # -# BUG: Possible bug in ip-link(8). +# BUG: Possible bug in ip-link(8) on Raspbian. # "ip -j link show master cni0" exits 255 when cni0 does not exist where -# "ip -j netns show master cni0" returns "[ ]", which is preferred. +# "ip -j netns show master cni0" reports "[ ]", which is preferred. # - name: Get list of network interface(s) that match 'master cni0' register: ip_link_show From 884972616b402b522ae88f079f4c6d73e8d6c943 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sat, 4 Dec 2021 10:19:36 -0500 Subject: [PATCH 046/108] Added --tls-san with --cluster-init Signed-off-by: Jon S. Stumpf --- roles/k3s/server/defaults/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/k3s/server/defaults/main.yml b/roles/k3s/server/defaults/main.yml index 69db8a19c..0d4feaaaf 100644 --- a/roles/k3s/server/defaults/main.yml +++ b/roles/k3s/server/defaults/main.yml @@ -4,6 +4,7 @@ server_init_args: >- {% if groups['k3s_server'] | length > 1 %} {% if ansible_host == hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) %} --cluster-init + --tls-san {{ apiserver_endpoint }} {% else %} --server https://{{ hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) }}:6443 {% endif %} From b9487df54acf0e45a0dd60546de182a064ded9a8 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Fri, 24 Dec 2021 21:20:34 -0500 Subject: [PATCH 047/108] Replaced octal modes with symbolic modes Signed-off-by: Jon S. Stumpf --- roles/download/tasks/main.yml | 2 +- roles/k3s/agent/tasks/main.yml | 2 +- roles/k3s/server/tasks/main.yml | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 013d9fcf5..c3273f22d 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -91,5 +91,5 @@ dest: "{{ bin_dir }}/k3s" owner: root group: root - mode: 0755 + mode: "u=rwx,g=rx,o=rx" diff --git a/roles/k3s/agent/tasks/main.yml b/roles/k3s/agent/tasks/main.yml index 2a5715976..ed8bef972 100644 --- a/roles/k3s/agent/tasks/main.yml +++ b/roles/k3s/agent/tasks/main.yml @@ -7,7 +7,7 @@ dest: "{{ systemd_dir }}/k3s-agent.service" owner: root group: root - mode: 0644 + mode: "u=rw,g=r,o=r" - name: Enable and check K3s service systemd: diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index f0923b916..4f8874449 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -51,7 +51,7 @@ dest: "{{ systemd_dir }}/k3s.service" owner: root group: root - mode: 0644 + mode: "u=rw,g=r,o=r" - name: Enable and check K3s service systemd: @@ -76,7 +76,7 @@ - name: Make node-token world-readable file: path: "{{ data_dir }}/server/node-token" - mode: 0644 + mode: "u=rw,g=r,o=r" - name: Read node-token from the server slurp: @@ -103,7 +103,7 @@ dest: "{{ bin_dir }}/k3s.sh" owner: root group: root - mode: 0755 + mode: "u=rwx,g=rx,o=rx" when: - data_dir is defined - data_dir != '/var/lib/rancher/k3s' From e0b8385a72fa8c5723fafcdb415d4916de12ca78 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Fri, 24 Dec 2021 21:22:43 -0500 Subject: [PATCH 048/108] Added {{ bin_dir }} to command line in k3s-init Signed-off-by: Jon S. Stumpf --- roles/k3s/server/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index 4f8874449..fa876a639 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -18,7 +18,7 @@ cmd: "systemd-run -p RestartSec=2 \ -p Restart=on-failure \ --unit=k3s-init \ - k3s server {{ server_init_args }}" + {{ bin_dir }}/k3s server {{ server_init_args }}" creates: "{{ systemd_dir }}/k3s.service" args: warn: false # The ansible systemd module does not support transient units @@ -27,7 +27,7 @@ block: - name: Verify that all nodes actually joined (check k3s-init.service if this fails) command: - cmd: k3s kubectl get nodes -l "node-role.kubernetes.io/master=true" -o=jsonpath="{.items[*].metadata.name}" + cmd: "{{ bin_dir }}/k3s kubectl get nodes -l 'node-role.kubernetes.io/master=true' -o=jsonpath='{.items[*].metadata.name}'" register: nodes until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['k3s_server'] | length) retries: 20 From 58a0b4c2f6fff1efeb5b110dc2bc9d54bb1ecc4b Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sat, 25 Dec 2021 09:51:39 -0500 Subject: [PATCH 049/108] Fixed hardcoded path with {{ bin_dir }} Signed-off-by: Jon S. Stumpf --- roles/k3s/server/templates/k3s.sh.j2 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/k3s/server/templates/k3s.sh.j2 b/roles/k3s/server/templates/k3s.sh.j2 index 6d46a00f8..decdc719d 100755 --- a/roles/k3s/server/templates/k3s.sh.j2 +++ b/roles/k3s/server/templates/k3s.sh.j2 @@ -3,7 +3,7 @@ # k3s.sh is used to supply the --data-dir argument to k3s for symlink'd commands. # Note: this file is only present when data_dir is defined. -K3S_BIN="/usr/local/bin/k3s" +K3S_BIN="{{ bin_dir }}/k3s" DATA_DIR="{{ data_dir }}" BASENAME="${0##*/}" From 86c6caf422f080b0041f5a8e9b756f19ed755a75 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Fri, 24 Dec 2021 21:37:25 -0500 Subject: [PATCH 050/108] Added new variable, first_server Signed-off-by: Jon S. Stumpf --- playbook/group_vars/all.yml | 3 ++- roles/k3s/server/defaults/main.yml | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index a8c47f2a3..deb9c0f75 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -15,7 +15,8 @@ systemd_dir: "{{ install_k3s_systemd_dir | default('/etc/systemd/system') }}" bin_dir: "{{ install_k3s_bin_dir | default('/usr/local/bin') }}" data_dir: "{{ install_k3s_data_dir | default('/var/lib/rancher/k3s') }}" -apiserver_endpoint: "{{ hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) }}" +first_server: "{{ hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) }}" +apiserver_endpoint: "{{ first_server }}" # k3s_token: "MySuperSecureToken" diff --git a/roles/k3s/server/defaults/main.yml b/roles/k3s/server/defaults/main.yml index 0d4feaaaf..5cf1b67e5 100644 --- a/roles/k3s/server/defaults/main.yml +++ b/roles/k3s/server/defaults/main.yml @@ -2,11 +2,11 @@ ansible_user: root server_init_args: >- {% if groups['k3s_server'] | length > 1 %} - {% if ansible_host == hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) %} + {% if ansible_host == first_server %} --cluster-init --tls-san {{ apiserver_endpoint }} {% else %} - --server https://{{ hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) }}:6443 + --server https://{{ first_server }}:6443 {% endif %} --token {{ k3s_token }} {% endif %} From 7523ce80954339cada9d1eefc22eeba201e41911 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sun, 26 Dec 2021 14:47:02 -0500 Subject: [PATCH 051/108] Fixed bug where verification would run once per server in k3s_servers, 20 times Signed-off-by: Jon S. Stumpf --- roles/k3s/server/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index fa876a639..e4eb4607d 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -28,6 +28,7 @@ - name: Verify that all nodes actually joined (check k3s-init.service if this fails) command: cmd: "{{ bin_dir }}/k3s kubectl get nodes -l 'node-role.kubernetes.io/master=true' -o=jsonpath='{.items[*].metadata.name}'" + run_once: true register: nodes until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['k3s_server'] | length) retries: 20 From 284cf4f06070b355bddc98409641c4bc2f43aa67 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sun, 26 Dec 2021 14:52:08 -0500 Subject: [PATCH 052/108] Fixed verification to not run in check_mode Signed-off-by: Jon S. Stumpf --- roles/k3s/server/tasks/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index e4eb4607d..3d0dcf3be 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -28,6 +28,7 @@ - name: Verify that all nodes actually joined (check k3s-init.service if this fails) command: cmd: "{{ bin_dir }}/k3s kubectl get nodes -l 'node-role.kubernetes.io/master=true' -o=jsonpath='{.items[*].metadata.name}'" + when: not ansible_check_mode run_once: true register: nodes until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['k3s_server'] | length) From 63318f25323f25c1cf88dff48cd357808089cf4a Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sun, 26 Dec 2021 15:08:39 -0500 Subject: [PATCH 053/108] k3s_token must be defined Signed-off-by: Jon S. Stumpf --- playbook/group_vars/all.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index deb9c0f75..e29f11604 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -18,7 +18,7 @@ data_dir: "{{ install_k3s_data_dir | default('/var/lib/rancher/k3s') }}" first_server: "{{ hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) }}" apiserver_endpoint: "{{ first_server }}" -# k3s_token: "MySuperSecureToken" +k3s_token: "MySuperSecureToken" # Services information k3s_services: From 6a9a02b0c911c1400814fcacd2b6a511d96e5d23 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sun, 26 Dec 2021 17:43:27 -0500 Subject: [PATCH 054/108] Added roles/config-check Signed-off-by: Jon S. Stumpf --- playbook/site.yml | 2 + roles/config-check/defaults/main.yml | 9 +++++ roles/config-check/tasks/main.yml | 57 ++++++++++++++++++++++++++++ roles/download/tasks/main.yml | 43 +++------------------ 4 files changed, 74 insertions(+), 37 deletions(-) create mode 100644 roles/config-check/defaults/main.yml create mode 100644 roles/config-check/tasks/main.yml diff --git a/playbook/site.yml b/playbook/site.yml index 4f8694b47..d2dc9b011 100644 --- a/playbook/site.yml +++ b/playbook/site.yml @@ -4,6 +4,8 @@ gather_facts: yes become: yes roles: + - role: config-check + run_once: true - role: prereq - role: download - role: raspberrypi diff --git a/roles/config-check/defaults/main.yml b/roles/config-check/defaults/main.yml new file mode 100644 index 000000000..a601f9ff8 --- /dev/null +++ b/roles/config-check/defaults/main.yml @@ -0,0 +1,9 @@ +--- + +k3s_channel_url: 'https://update.k3s.io/v1-release/channels' +k3s_channel: 'stable' +k3s_version: 'undefined' +k3s_commit: 'undefined' + +report_version_info: false + diff --git a/roles/config-check/tasks/main.yml b/roles/config-check/tasks/main.yml new file mode 100644 index 000000000..6236b461a --- /dev/null +++ b/roles/config-check/tasks/main.yml @@ -0,0 +1,57 @@ +--- +- name: Check for correct server configuration (1 = non-HA; 3+ = HA) + fail: + msg: "HA configuration requires a minimum of three (3) servers." + when: groups['k3s_server'] | length == 2 + +- name: Determine version to download + block: + - name: Determine version from channel + vars: + version_url: "{{ k3s_channel_url }}/{{ k3s_channel }}" + block: + - name: Output channel information + debug: + var: version_url + when: report_version_info + + - name: Get version from channel + uri: + url: "{{ version_url }}" + follow_redirects: safe + force: yes + return_content: no + register: channel_version_info + + - name: Set version from channel + set_fact: + k3s_version: "{{ channel_version_info.url.split('/')[-1] }}" + + when: k3s_version == 'undefined' + + - name: Output version + debug: + var: k3s_version + when: report_version_info + + when: k3s_commit == 'undefined' + check_mode: no + +- name: Check for minimum k3s version for HA configuration + fail: + msg: "HA configurations require k3s v1.19.5+k3s1 or greater. {{ k3s_version }} was specified." + vars: + short_version: "{{ k3s_version | regex_search('v([^+]+)+', '\\1') | first }}" + when: + - k3s_commit == 'undefined' + - groups['k3s_server'] | length >= 3 + - short_version is version('1.19.5', 'lt', True) + +- name: No check when k3s_commit is specified + debug: + msg: "There is no version check for HA support when a k3s_commit is specified. We assume you know what you are doing." + when: + - k3s_commit != 'undefined' + - groups['k3s_server'] | length >= 3 + - report_version_info + diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index c3273f22d..699817709 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -25,43 +25,11 @@ - ansible_facts.architecture is search("arm") - ansible_facts.userspace_bits == "32" -- name: Determine version to download - block: - - name: Determine version from channel - vars: - version_url: "{{ k3s_channel_url }}/{{ k3s_channel }}" - block: - - name: Output channel information - debug: - var: version_url - when: report_download_urls - - - name: Get version from channel - uri: - url: "{{ version_url }}" - follow_redirects: safe - force: true - return_content: false - register: channel_version_info - - - name: Set version from channel - set_fact: - k3s_version: "{{ channel_version_info.url.split('/')[-1] }}" - - when: k3s_version == 'undefined' - run_once: true - - - name: Output version - debug: - var: k3s_version - when: report_download_urls - run_once: true - - - name: Determine GitHub URLs - set_fact: - binary_url: "{{ github_url }}/download/{{ k3s_version }}/k3s{{ k3s_suffix }}" - hash_url: "{{ github_url }}/download/{{ k3s_version }}/sha256sum-{{ k3s_arch }}.txt" - +# Determine URLs for download +- name: Determine Github URLs + set_fact: + binary_url: "{{ github_url }}/download/{{ k3s_version }}/k3s{{ k3s_suffix }}" + hash_url: "{{ github_url }}/download/{{ k3s_version }}/sha256sum-{{ k3s_arch }}.txt" when: k3s_commit == 'undefined' check_mode: no @@ -84,6 +52,7 @@ when: report_download_urls check_mode: no +# Download binaries - name: Download k3s binary get_url: url: "{{ binary_url }}" From 5db083004e5aa33680637c3e63b1820dc9d386e5 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sun, 26 Dec 2021 18:38:43 -0500 Subject: [PATCH 055/108] Added ha_enabled flag to specifically ask for an HA embedded database using etcd Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/README.md | 3 +++ inventory/sample/group_vars/all.yml | 3 +++ roles/config-check/defaults/main.yml | 2 ++ roles/config-check/tasks/main.yml | 27 ++++++++++++++++++++++----- roles/k3s/server/defaults/main.yml | 2 +- 5 files changed, 31 insertions(+), 6 deletions(-) diff --git a/inventory/sample/group_vars/README.md b/inventory/sample/group_vars/README.md index e01b6daa4..7bdcdc99f 100644 --- a/inventory/sample/group_vars/README.md +++ b/inventory/sample/group_vars/README.md @@ -18,6 +18,9 @@ The default is `debian`. - **cluster_config**: specifies the location of where to capture the kube config of the new cluster. The default is `playbook/cluster.conf`. +- **ha_enabled**: specifies if the cluster will have an HA embedded database using **etcd**. +The default is `false`. + ### Flags that control the version of k3s downloaded There are four (4) flags that control which version of **k3s** is installed on your hosts. diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index bb1700cb1..487b0455d 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -8,6 +8,9 @@ ansible_user: debian # The location of where to capture the kube config of the new cluster cluster_config: cluster.conf +# Set up this cluster with an HA embedded database using etcd +ha_enabled: false + # Use the latest version install_k3s_channel: 'latest' diff --git a/roles/config-check/defaults/main.yml b/roles/config-check/defaults/main.yml index a601f9ff8..3db162049 100644 --- a/roles/config-check/defaults/main.yml +++ b/roles/config-check/defaults/main.yml @@ -1,5 +1,7 @@ --- +ha_enabled: false + k3s_channel_url: 'https://update.k3s.io/v1-release/channels' k3s_channel: 'stable' k3s_version: 'undefined' diff --git a/roles/config-check/tasks/main.yml b/roles/config-check/tasks/main.yml index 6236b461a..5114abc94 100644 --- a/roles/config-check/tasks/main.yml +++ b/roles/config-check/tasks/main.yml @@ -1,8 +1,25 @@ --- -- name: Check for correct server configuration (1 = non-HA; 3+ = HA) +- name: Check for correct single server configuration fail: - msg: "HA configuration requires a minimum of three (3) servers." - when: groups['k3s_server'] | length == 2 + msg: "Single server configuration requires exactly one (1) server." + when: + - not ha_enabled + - groups['k3s_server'] | length != 1 + +# Purposefully do the HA check in two steps +- name: Check for correct HA configuration (minimum) + fail: + msg: "HA configuration requires an odd number of servers > 1 (i.e., minimum is three)." + when: + - ha_enabled + - groups['k3s_server'] | length < 3 + +- name: Check for correct HA configuration (odd number) + fail: + msg: "HA configuration requires an ODD number of servers." + when: + - ha_enabled + - groups['k3s_server'] | length is divisibleby 2 - name: Determine version to download block: @@ -44,7 +61,7 @@ short_version: "{{ k3s_version | regex_search('v([^+]+)+', '\\1') | first }}" when: - k3s_commit == 'undefined' - - groups['k3s_server'] | length >= 3 + - ha_enabled - short_version is version('1.19.5', 'lt', True) - name: No check when k3s_commit is specified @@ -52,6 +69,6 @@ msg: "There is no version check for HA support when a k3s_commit is specified. We assume you know what you are doing." when: - k3s_commit != 'undefined' - - groups['k3s_server'] | length >= 3 + - ha_enabled - report_version_info diff --git a/roles/k3s/server/defaults/main.yml b/roles/k3s/server/defaults/main.yml index 5cf1b67e5..b97dc56d7 100644 --- a/roles/k3s/server/defaults/main.yml +++ b/roles/k3s/server/defaults/main.yml @@ -1,7 +1,7 @@ --- ansible_user: root server_init_args: >- - {% if groups['k3s_server'] | length > 1 %} + {% if ha_enabled %} {% if ansible_host == first_server %} --cluster-init --tls-san {{ apiserver_endpoint }} From 4a27b19e960a371423450435b128870cfb42d4d1 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sun, 26 Dec 2021 19:48:49 -0500 Subject: [PATCH 056/108] Moved k3s-init tasks to a separate file Signed-off-by: Jon S. Stumpf --- roles/k3s/server/tasks/k3s-init.yml | 44 ++++++++++++++++++++++++++ roles/k3s/server/tasks/main.yml | 48 +++++------------------------ 2 files changed, 51 insertions(+), 41 deletions(-) create mode 100644 roles/k3s/server/tasks/k3s-init.yml diff --git a/roles/k3s/server/tasks/k3s-init.yml b/roles/k3s/server/tasks/k3s-init.yml new file mode 100644 index 000000000..8582fabe8 --- /dev/null +++ b/roles/k3s/server/tasks/k3s-init.yml @@ -0,0 +1,44 @@ +--- + +- name: Clean previous runs of k3s-init + systemd: + name: k3s-init + state: stopped + failed_when: false + +- name: Clean previous runs of k3s-init + command: systemctl reset-failed k3s-init + failed_when: false + changed_when: false + args: + warn: false # The ansible systemd module does not support reset-failed + +- name: Init cluster inside the transient k3s-init service + command: + cmd: "systemd-run -p RestartSec=2 \ + -p Restart=on-failure \ + --unit=k3s-init \ + {{ bin_dir }}/k3s server {{ server_init_args }}" + creates: "{{ systemd_dir }}/k3s.service" + args: + warn: false # The ansible systemd module does not support transient units + +- name: Verification + block: + - name: Verify that all nodes actually joined (check k3s-init.service if this fails) + command: + cmd: "{{ bin_dir }}/k3s kubectl get nodes -l 'node-role.kubernetes.io/master=true' -o=jsonpath='{.items[*].metadata.name}'" + when: not ansible_check_mode + run_once: true + register: nodes + until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['k3s_server'] | length) + retries: 20 + delay: 10 + changed_when: false + always: + - name: Kill the temporary service used for initialization + systemd: + name: k3s-init + state: stopped + failed_when: false + diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index 3d0dcf3be..49b27cd72 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -1,46 +1,12 @@ --- -- name: Clean previous runs of k3s-init - systemd: - name: k3s-init - state: stopped - failed_when: false - -- name: Clean previous runs of k3s-init - command: systemctl reset-failed k3s-init - failed_when: false - changed_when: false - args: - warn: false # The ansible systemd module does not support reset-failed - -- name: Init cluster inside the transient k3s-init service - command: - cmd: "systemd-run -p RestartSec=2 \ - -p Restart=on-failure \ - --unit=k3s-init \ - {{ bin_dir }}/k3s server {{ server_init_args }}" - creates: "{{ systemd_dir }}/k3s.service" - args: - warn: false # The ansible systemd module does not support transient units - -- name: Verification - block: - - name: Verify that all nodes actually joined (check k3s-init.service if this fails) - command: - cmd: "{{ bin_dir }}/k3s kubectl get nodes -l 'node-role.kubernetes.io/master=true' -o=jsonpath='{.items[*].metadata.name}'" - when: not ansible_check_mode - run_once: true - register: nodes - until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['k3s_server'] | length) - retries: 20 - delay: 10 - changed_when: false - always: - - name: Kill the temporary service used for initialization - systemd: - name: k3s-init - state: stopped - failed_when: false +################################################################################ +# Setup servers in cluster using k3s-init +# + +- name: Initialize k3s cluster + include_tasks: k3s-init.yml + when: ha_enabled ################################################################################ # Setup k3s service From 7b43a5719fc233f9157845a8f09b7fb5050dafbd Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sun, 26 Dec 2021 19:23:33 -0500 Subject: [PATCH 057/108] Added cluster VIP method: externally provided cluster VIP Signed-off-by: Jon S. Stumpf --- README.md | 21 +++++++++++++-------- inventory/sample/group_vars/README.md | 6 ++++++ inventory/sample/group_vars/all.yml | 2 ++ playbook/group_vars/all.yml | 3 ++- 4 files changed, 23 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index d022cab52..391efc713 100644 --- a/README.md +++ b/README.md @@ -18,8 +18,8 @@ on processor architecture: ## System requirements -Deployment environment must have Ansible 2.4.0+ -Server and agents must have passwordless SSH access +Deployment environment must have Ansible 2.4.0+. +Server and agents must have passwordless SSH access. ## Usage @@ -43,11 +43,7 @@ k3s_server k3s_agent ``` -If multiple hosts are in the server group, the playbook will automatically setup k3s in HA mode with etcd. -https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/ -This requires at least k3s version 1.19.1 - -If needed, you can also edit `inventory/my-cluster/group_vars/all.yml` to match your environment. +Third, edit `inventory/my-cluster/group_vars/all.yml` to best match your environment. Start provisioning of the cluster using the following command: @@ -57,8 +53,17 @@ ansible-playbook playbook/site.yml -i inventory/my-cluster/hosts.ini ## Kubeconfig -To get access to your new **Kubernetes** cluster, just use the generated kube config. +To get access to your new **Kubernetes** cluster, just use the generated kube configuration file. ```bash kubectl --kubeconfig playbook/cluster.conf ... ``` + +## High Availability +If you enable high availability (`ha_enabled`), the playbook will setup an embedded database using **etcd**. +High availability requires at least k3s version **v1.19.5+k3s1** and an odd number of servers (minimum of three). +See [https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/). + +HA expects that there is a cluster virtual IP (`ha_cluster_vip`) in front of the control-plane servers. +Currently, the only supported method is to use a virtual IP, external to the cluster. + diff --git a/inventory/sample/group_vars/README.md b/inventory/sample/group_vars/README.md index 7bdcdc99f..8301ce3f4 100644 --- a/inventory/sample/group_vars/README.md +++ b/inventory/sample/group_vars/README.md @@ -21,6 +21,12 @@ The default is `playbook/cluster.conf`. - **ha_enabled**: specifies if the cluster will have an HA embedded database using **etcd**. The default is `false`. +- **ha_cluster_vip**: specifies the virtual IP address in front of the control-plane servers for +agent configuration as well as cluster definition in .kube/config. +Note: This is an IP address different than those of the cluster nodes. +Today, this is a static IP address provided in this file. +It is possible to get an IP address dynamically but that is not implemented here. + ### Flags that control the version of k3s downloaded There are four (4) flags that control which version of **k3s** is installed on your hosts. diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index 487b0455d..a9e5d12b1 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -11,6 +11,8 @@ cluster_config: cluster.conf # Set up this cluster with an HA embedded database using etcd ha_enabled: false +ha_cluster_vip: 192.168.1.254 + # Use the latest version install_k3s_channel: 'latest' diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index e29f11604..91c53495d 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -16,7 +16,8 @@ bin_dir: "{{ install_k3s_bin_dir | default('/usr/local/bin') }}" data_dir: "{{ install_k3s_data_dir | default('/var/lib/rancher/k3s') }}" first_server: "{{ hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) }}" -apiserver_endpoint: "{{ first_server }}" +apiserver_endpoint: "{{ ha_cluster_vip if ha_enabled else first_server }}" + k3s_token: "MySuperSecureToken" From daf2dd201d29f9a08cc0b7df1b6ac8e1fb174a85 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sun, 26 Dec 2021 20:11:43 -0500 Subject: [PATCH 058/108] Cherry-Picked: Fixed download task names Signed-off-by: Jon S. Stumpf --- roles/download/tasks/main.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 699817709..766f1c2ae 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -40,13 +40,13 @@ when: k3s_commit != 'undefined' check_mode: no -- name: Output URLs +- name: Report Download URLs block: - - name: Binary URL + - name: Report URL for binary debug: var: binary_url - - name: Hash URL + - name: Report URL for hash debug: var: hash_url when: report_download_urls From 72aefe31f0a0f16d7f2d95a66609527e775f509c Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sun, 26 Dec 2021 21:20:56 -0500 Subject: [PATCH 059/108] Cherry-Picked: Added option to keep the downloaded binaries after reset Signed-off-by: Jon S. Stumpf --- roles/reset/defaults/main.yml | 5 +++++ roles/reset/tasks/main.yml | 11 +++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) create mode 100644 roles/reset/defaults/main.yml diff --git a/roles/reset/defaults/main.yml b/roles/reset/defaults/main.yml new file mode 100644 index 000000000..82b7ba800 --- /dev/null +++ b/roles/reset/defaults/main.yml @@ -0,0 +1,5 @@ +--- + +# Changing this flag will keep the downloaded binaries after a reset +keep_binaries: false + diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 9d8b5c5b1..9befced69 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -165,7 +165,7 @@ label: "{{ item.item }}" # Remove files and directories -- name: Remove files, binaries and data +- name: Remove files and data file: path: "{{ item }}" state: absent @@ -175,11 +175,18 @@ - /run/flannel - "{{ data_dir }}" - /var/lib/kubelet - - "{{ bin_dir }}/k3s" - "{{ bin_dir }}/k3s.sh" - "{{ bin_dir }}/k3s-killall.sh" - "{{ bin_dir }}/k3s-uninstall.sh" +- name: Remove downloaded binaries + file: + path: "{{ item }}" + state: absent + when: not keep_binaries + loop: + - "{{ bin_dir }}/k3s" + - name: Remove ~{{ ansible_user }}/.kube/config file: path: "~{{ ansible_user }}/.kube/config" From b24490a991098b63d5947b12ab78df9078aed79e Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sun, 26 Dec 2021 21:00:19 -0500 Subject: [PATCH 060/108] Ensure that all files under inventory/sample are stored in git Signed-off-by: Jon S. Stumpf --- inventory/.gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/inventory/.gitignore b/inventory/.gitignore index 5ae5024ee..9a1ab6b19 100644 --- a/inventory/.gitignore +++ b/inventory/.gitignore @@ -1,3 +1,4 @@ * !.gitignore !sample/ +!sample/** From 808bb14aba5474d8589c93cfc161ae9a8a236131 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sun, 26 Dec 2021 21:41:14 -0500 Subject: [PATCH 061/108] Added cluster VIP method: kube-vip Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/README.md | 11 ++- inventory/sample/group_vars/all.yml | 1 + .../sample/group_vars/k3s_server/kube-vip.yml | 26 +++++++ .../server/tasks/cluster-method/external.yml | 2 + .../server/tasks/cluster-method/kube-vip.yml | 26 +++++++ roles/k3s/server/tasks/main.yml | 4 + .../kube-vip/kube-vip-arp-ds.yaml.j2 | 78 +++++++++++++++++++ .../kube-vip/kube-vip-rbac.yaml.j2 | 35 +++++++++ roles/reset/tasks/cluster-method/external.yml | 3 + roles/reset/tasks/cluster-method/kube-vip.yml | 10 +++ roles/reset/tasks/main.yml | 8 ++ 11 files changed, 203 insertions(+), 1 deletion(-) create mode 100644 inventory/sample/group_vars/k3s_server/kube-vip.yml create mode 100644 roles/k3s/server/tasks/cluster-method/external.yml create mode 100644 roles/k3s/server/tasks/cluster-method/kube-vip.yml create mode 100644 roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-arp-ds.yaml.j2 create mode 100644 roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-rbac.yaml.j2 create mode 100644 roles/reset/tasks/cluster-method/external.yml create mode 100644 roles/reset/tasks/cluster-method/kube-vip.yml diff --git a/inventory/sample/group_vars/README.md b/inventory/sample/group_vars/README.md index 8301ce3f4..30a2fa25f 100644 --- a/inventory/sample/group_vars/README.md +++ b/inventory/sample/group_vars/README.md @@ -18,15 +18,24 @@ The default is `debian`. - **cluster_config**: specifies the location of where to capture the kube config of the new cluster. The default is `playbook/cluster.conf`. +## High-Availability (HA) Flags + - **ha_enabled**: specifies if the cluster will have an HA embedded database using **etcd**. The default is `false`. -- **ha_cluster_vip**: specifies the virtual IP address in front of the control-plane servers for +- **ha_cluster_vip**: specifies the virtual IP (VIP) address in front of the control-plane servers for agent configuration as well as cluster definition in .kube/config. Note: This is an IP address different than those of the cluster nodes. Today, this is a static IP address provided in this file. It is possible to get an IP address dynamically but that is not implemented here. +- **ha_cluster_method**: specifies the method of clustering to use for the virtual IP. +The methods implemented today are: +1. **external** - requires a load-balancer external to the cluster +2. **kube-vip** - [https://kube-vip.io](https://kube-vip.io), arp-based daemonset using leader election + +Other load-balancing options are available (e.g., **keepalived**) but are not implemented here (yet). + ### Flags that control the version of k3s downloaded There are four (4) flags that control which version of **k3s** is installed on your hosts. diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index a9e5d12b1..ef8a8ea28 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -12,6 +12,7 @@ cluster_config: cluster.conf ha_enabled: false ha_cluster_vip: 192.168.1.254 +ha_cluster_method: external # Use the latest version install_k3s_channel: 'latest' diff --git a/inventory/sample/group_vars/k3s_server/kube-vip.yml b/inventory/sample/group_vars/k3s_server/kube-vip.yml new file mode 100644 index 000000000..31cc2eb43 --- /dev/null +++ b/inventory/sample/group_vars/k3s_server/kube-vip.yml @@ -0,0 +1,26 @@ +--- + +# This file is used to configure the kube-vip manifest files. It typically does +# not need to be changed but you can at your discretion to suit your environment. +# These variables are used only when HA is enabled and the method chosen is kube-vip. + +# These are commonly changed values for kube-vip. +kube_vip_version: 'v0.4.0' +kube_vip_interface: 'eth0' + +# These are less commonly changed values for kube-vip. +kube_vip_cidr: '32' +kube_vip_leaseduration: '5' +kube_vip_renewdeadline: '3' +kube_vip_retryperiod: '1' + +kube_vip_namespace: 'kube-system' +kube_vip_daemonset_name: 'kube-vip-ds' +kube_vip_container_name: 'kube-vip' +kube_vip_serviceaccount_name: 'kube-vip' +kube_vip_clusterrole_name: 'kube-vip-role' +kube_vip_clusterrolebinding_name: 'kube-vip-binding' + +# More options can be found in ... +# roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-arp-ds.yaml.j2 + diff --git a/roles/k3s/server/tasks/cluster-method/external.yml b/roles/k3s/server/tasks/cluster-method/external.yml new file mode 100644 index 000000000..55d3cbcc7 --- /dev/null +++ b/roles/k3s/server/tasks/cluster-method/external.yml @@ -0,0 +1,2 @@ +--- +# Nothing (at the moment) to do for an external load-balancer. diff --git a/roles/k3s/server/tasks/cluster-method/kube-vip.yml b/roles/k3s/server/tasks/cluster-method/kube-vip.yml new file mode 100644 index 000000000..9734f8ee7 --- /dev/null +++ b/roles/k3s/server/tasks/cluster-method/kube-vip.yml @@ -0,0 +1,26 @@ +--- +- name: Create manifest directory {{ ha_cluster_method }} + file: + path: "{{ data_dir }}/server/manifests/{{ ha_cluster_method }}" + state: directory + owner: root + group: root + mode: "u=rwx,g=,o=" + register: manifest_dir + +- name: Copy RBAC manifest + template: + src: "cluster-method/{{ ha_cluster_method }}/kube-vip-rbac.yaml.j2" + dest: "{{ manifest_dir.path }}/kube-vip-rbac.yaml" + owner: root + group: root + mode: "u=rw,g=,o=" + +- name: Copy arp daemonset manifest + template: + src: "cluster-method/{{ ha_cluster_method }}/kube-vip-arp-ds.yaml.j2" + dest: "{{ manifest_dir.path }}/kube-vip-arp-ds.yaml" + owner: root + group: root + mode: "u=rw,g=,o=" + diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index 49b27cd72..8a92fdd07 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -8,6 +8,10 @@ include_tasks: k3s-init.yml when: ha_enabled +- name: Perform HA-specific tasks + include_tasks: "cluster-method/{{ ha_cluster_method }}.yml" + when: ha_enabled + ################################################################################ # Setup k3s service # diff --git a/roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-arp-ds.yaml.j2 b/roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-arp-ds.yaml.j2 new file mode 100644 index 000000000..ad283d251 --- /dev/null +++ b/roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-arp-ds.yaml.j2 @@ -0,0 +1,78 @@ +# +# This configuration implements an arp-based failover in a daemonset using leader election. +# Original: https://kube-vip.io/manifests/v0.4.0/kube-vip-arp-ds.yaml +# +apiVersion: apps/v1 +kind: DaemonSet +metadata: + creationTimestamp: null + name: {{ kube_vip_daemonset_name }} + namespace: {{ kube_vip_namespace }} +spec: + selector: + matchLabels: + name: {{ kube_vip_daemonset_name }} + template: + metadata: + creationTimestamp: null + labels: + name: {{ kube_vip_daemonset_name }} + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: node-role.kubernetes.io/master + operator: Exists + - matchExpressions: + - key: node-role.kubernetes.io/control-plane + operator: Exists + containers: + - args: + - manager + env: + - name: vip_arp + value: "true" + - name: port + value: "6443" + - name: vip_interface + value: "{{ kube_vip_interface }}" + - name: vip_cidr + value: "{{ kube_vip_cidr }}" + - name: cp_enable + value: "true" + - name: cp_namespace + value: "{{ kube_vip_namespace }}" + - name: vip_ddns + value: "false" + - name: svc_enable + value: "true" + - name: vip_leaderelection + value: "true" + - name: vip_leaseduration + value: "{{ kube_vip_leaseduration }}" + - name: vip_renewdeadline + value: "{{ kube_vip_renewdeadline }}" + - name: vip_retryperiod + value: "{{ kube_vip_retryperiod }}" + - name: vip_address + value: "{{ ha_cluster_vip }}" + image: ghcr.io/kube-vip/kube-vip:{{ kube_vip_version }} + imagePullPolicy: Always + name: {{ kube_vip_container_name }} + resources: {} + securityContext: + capabilities: + add: + - NET_ADMIN + - NET_RAW + - SYS_TIME + hostNetwork: true + serviceAccountName: {{ kube_vip_serviceaccount_name }} + tolerations: + - effect: NoSchedule + operator: Exists + - effect: NoExecute + operator: Exists + updateStrategy: {} diff --git a/roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-rbac.yaml.j2 b/roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-rbac.yaml.j2 new file mode 100644 index 000000000..6378e5175 --- /dev/null +++ b/roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-rbac.yaml.j2 @@ -0,0 +1,35 @@ +# +# From https://kube-vip.io/manifests/rbac.yaml +# +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ kube_vip_serviceaccount_name }} + namespace: {{ kube_vip_namespace }} +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + annotations: + rbac.authorization.kubernetes.io/autoupdate: "true" + name: system:{{ kube_vip_clusterrole_name }} +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "create", "update", "list", "put"] + - apiGroups: [""] + resources: ["services"] + verbs: ["list","get","watch", "update"] +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: system:{{ kube_vip_clusterrolebinding_name }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: system:{{ kube_vip_clusterrole_name }} +subjects: +- kind: ServiceAccount + name: {{ kube_vip_serviceaccount_name }} + namespace: {{ kube_vip_namespace }} diff --git a/roles/reset/tasks/cluster-method/external.yml b/roles/reset/tasks/cluster-method/external.yml new file mode 100644 index 000000000..69f228c0f --- /dev/null +++ b/roles/reset/tasks/cluster-method/external.yml @@ -0,0 +1,3 @@ +--- +# Nothing (at the moment) to do for an external load-balancer. + diff --git a/roles/reset/tasks/cluster-method/kube-vip.yml b/roles/reset/tasks/cluster-method/kube-vip.yml new file mode 100644 index 000000000..90b31e2a2 --- /dev/null +++ b/roles/reset/tasks/cluster-method/kube-vip.yml @@ -0,0 +1,10 @@ +--- +# Recommended in https://kube-vip.io/usage/k3s/ +# See "Clean Environment". +- name: Reset local interface + command: "{{ item }}" + with_items: + - ip addr flush dev lo + - ip addr add 127.0.0.1/8 dev lo + failed_when: false + diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 9befced69..cecaf014f 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -139,6 +139,14 @@ # # Should this be done rather than focusing on a discrete list of services (k3s_services)? +# +# Perform HA cluster-method-specific tasks +# +- name: Remove HA cluster method + include_tasks: "cluster-method/{{ ha_cluster_method }}.yml" + when: + - ha_enabled + # # for cmd in kubectl crictl ctr; do # if [ -L {{ bin_dir }}/$cmd ]; then From 2968d284e880252b7a7b095d0896632efd1cda70 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 27 Dec 2021 00:54:26 -0500 Subject: [PATCH 062/108] Some fixes in inventory/sample/group_vars/README.md Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/README.md | 86 ++++++++++++++------------- 1 file changed, 45 insertions(+), 41 deletions(-) diff --git a/inventory/sample/group_vars/README.md b/inventory/sample/group_vars/README.md index 30a2fa25f..36055d45b 100644 --- a/inventory/sample/group_vars/README.md +++ b/inventory/sample/group_vars/README.md @@ -2,52 +2,58 @@ ## Introduction `inventory/x/group_vars/all.yml` is meant to be modified appropriately for your environment. -If you are familiar with installing **k3s** from [https://get.k3s.io/](https://get.k3s.io/), -some install flags have been implemented here. -ansible variables that were previously here have moved to `playbook/group_vars/all.yml`. -Those variables are used within the playbooks and roles are not meant to be changed by a user of **k3s-ansible**. +*ansible* variables that were previously here have moved to `playbook/group_vars/all.yml`. +Those variables are used within the playbooks and roles are not meant to be changed by a user of *k3s-ansible*. When adding a new _install_ variable, a corresponding variable is added to `playbook/group_vars/all.yml` -which is then used throughout **k3s-ansible**. +which is then used throughout *k3s-ansible*. -## General flags +## General Flags -- **ansible_user**: specifies the username that has SSH password-less access to configure your hosts. +- **ansible_user**: specifies the username that has *ssh* password-less access to configure your hosts. The default is `debian`. -- **cluster_config**: specifies the location of where to capture the kube config of the new cluster. +- **cluster_config**: specifies the location of where to capture the kube configuration file for the new cluster. The default is `playbook/cluster.conf`. ## High-Availability (HA) Flags -- **ha_enabled**: specifies if the cluster will have an HA embedded database using **etcd**. +- **ha_enabled**: specifies if the cluster will have an HA embedded database using *etcd*. The default is `false`. - **ha_cluster_vip**: specifies the virtual IP (VIP) address in front of the control-plane servers for -agent configuration as well as cluster definition in .kube/config. +agent configuration as well as cluster definition in `.kube/config`. Note: This is an IP address different than those of the cluster nodes. Today, this is a static IP address provided in this file. It is possible to get an IP address dynamically but that is not implemented here. - **ha_cluster_method**: specifies the method of clustering to use for the virtual IP. The methods implemented today are: -1. **external** - requires a load-balancer external to the cluster -2. **kube-vip** - [https://kube-vip.io](https://kube-vip.io), arp-based daemonset using leader election + 1. `external` - requires a load-balancer external to the cluster + 2. `kube-vip` - [https://kube-vip.io](https://kube-vip.io), arp-based daemonset using leader election -Other load-balancing options are available (e.g., **keepalived**) but are not implemented here (yet). +Other load-balancing options are available (e.g., *keepalived*) but are not implemented here (yet). -### Flags that control the version of k3s downloaded +## Install Flags -There are four (4) flags that control which version of **k3s** is installed on your hosts. +If you have installed *k3s* from [https://get.k3s.io](https://get.k3s.io), these flags will be familiar. +*Install* flags are meant to duplicate the features found in the install script +(see [Installation Options](https://rancher.com/docs/k3s/latest/en/installation/install-options/#options-for-installation-with-script)). +Each flag has a prefix of `install_` and implements, to the extent possible, the actions of the shell script as documented below. +Note: not all *install* flags have been implemented. -- **install_k3s_commit**: specifies the commit of **k3s** to download from temporary cloud storage. +### Flags that control the version of *k3s* downloaded + +There are four (4) flags that control which version of *k3s* is installed on your hosts. + +- **install_k3s_commit**: specifies the commit of *k3s* to download from temporary cloud storage. The default is to leave this `undefined` as this flag is for developers and QA use. -- **install_k3s_version**: specifies the version of **k3s** to download from Github. -If undefined (the default), ansible will attempt to download from a channel. +- **install_k3s_version**: specifies the version of *k3s* to download from Github. +If left `undefined` (the default), *ansible* will attempt to download from a channel. - **install_k3s_channel_url**: specifies the URL for the channels. -The default is [https://update.k3s.io/v1-release/channels](https://update.k3s.io/v1-release/channels) +The default is [https://update.k3s.io/v1-release/channels](https://update.k3s.io/v1-release/channels). It is not something typically changed but is implemented for completeness sake. - **install_k3s_channel**: specifies the channel from which to get the version. @@ -57,42 +63,40 @@ The default is the `stable` channel. A typical channel used is `latest`. There are three (3) flags that change the default location of files. -- **install_k3s_bin_dir**: specifies the directory to install the **k3s** binary and links. +- **install_k3s_bin_dir**: specifies the directory to install the *k3s* binary and links. The default is `/usr/local/bin`. -- **install_k3s_systemd_dir**: specifies the directory to install **systemd** +- **install_k3s_systemd_dir**: specifies the directory to install *systemd* service and environment files. The default is `/etc/systemd/system`. -- **install_k3s_data_dir**: specifies the data director for the **k3s** service. -This defaults to `/var/lib/rancher/k3s` and is not (yet) a flag in **k3s-io/k3s**. - -### Flags for the k3s executable - -The install script from [https://get.k3s.io/](https://get.k3s.io/) has one flag to -provide extra arguments to the **k3s** executable. **k3s-ansible** uses two flags, -one for the server and one for the agent(s). These are: +- **install_k3s_data_dir**: specifies the data directory for the *k3s* service. +This defaults to `/var/lib/rancher/k3s`. +Note: this is not (yet) a flag in *k3s-io/k3s*. -- **install_k3s_server_args**: Default is ''. -- **install_k3s_agent_args**: Default is ''. +### Flags for the *k3s* executable +The install script from [https://get.k3s.io/](https://get.k3s.io/) has one flag (**install_k3s_exec**) to +provide extra arguments to the *k3s* executable. *k3s-ansible* uses two flags: +one for servers and one for agents. These are: -## Other flags that were considered from [https://get.k3s.io/](https://get.k3s.io/) +- **install_k3s_server_args**: the default is `''`. +- **install_k3s_agent_args**: the default is `''`. ### Flags not yet implemented The flags that have yet to be implemented are: -- install_k3s_skip_selinux_rpm: If set to true, ansible will skip automatic installation of the **k3s** RPM. -- install_k3s_selinux_warn: If set to true, ansible will continue if the **k3s-selinux** policy is not found. -- install_k3s_name: specifies the name of systemd service to create. -- install_k3s_type: specifies the type of systemd service to create. +- **install_k3s_skip_selinux_rpm**: If set to true, *ansible* will skip automatic installation of the *k3s* RPM. +- **install_k3s_selinux_warn**: If set to true, *ansible* will continue if the *k3s-selinux* policy is not found. +- **install_k3s_name**: specifies the name of *systemd* service to create. +- **install_k3s_type**: specifies the type of *systemd* service to create. ### Flags that will not be implemented -Lastly, some flags did not make sense to implement with **k3s-ansible**: +Lastly, some flags did not make sense to implement with *k3s-ansible*: -- install_k3s_skip_download: k3s-ansible always downloads the **k3s** binary and its hash. -- install_k3s_force_restart: k3s-ansible always restarts the service. -- install_k3s_skip_enable: k3s-ansible always enables the service. -- install_k3s_skip_start: k3s-ansible always starts the service. +- **install_k3s_skip_download**: *k3s-ansible* always downloads the *k3s* binary and its hash. +- **install_k3s_force_restart**: *k3s-ansible* always restarts the service. +- **install_k3s_skip_enable**: *k3s-ansible* always enables the service. +- **install_k3s_skip_start**: *k3s-ansible* always starts the service. From b702f621577c9b5af3885f5440cb68d6b85f3811 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 27 Dec 2021 11:26:26 -0500 Subject: [PATCH 063/108] Added reference to inventory/sample/group_vars/README.md Signed-off-by: Jon S. Stumpf --- README.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 391efc713..41e1fe52d 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,7 @@ k3s_agent ``` Third, edit `inventory/my-cluster/group_vars/all.yml` to best match your environment. +See, `inventory/sample/group_vars/README.md` for more details. Start provisioning of the cluster using the following command: @@ -62,8 +63,9 @@ kubectl --kubeconfig playbook/cluster.conf ... ## High Availability If you enable high availability (`ha_enabled`), the playbook will setup an embedded database using **etcd**. High availability requires at least k3s version **v1.19.5+k3s1** and an odd number of servers (minimum of three). -See [https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/). +See the [HA-embedded documentation](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/) for more details. -HA expects that there is a cluster virtual IP (`ha_cluster_vip`) in front of the control-plane servers. -Currently, the only supported method is to use a virtual IP, external to the cluster. +HA expects that there is a virtual IP (`ha_cluster_vip`) in front of the control-plane servers. +A few methods have been implemented to provide and manage this VIP. +See `inventory/sample/group_vars/README.md` for more details. From 86e20ae809d8fd7d2ba2eec15d664ebfe6d6ecd5 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 27 Dec 2021 01:07:19 -0500 Subject: [PATCH 064/108] Added comments to playbook/site.yml Signed-off-by: Jon S. Stumpf --- playbook/site.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/playbook/site.yml b/playbook/site.yml index d2dc9b011..e19ebb801 100644 --- a/playbook/site.yml +++ b/playbook/site.yml @@ -1,5 +1,6 @@ --- +# Get hosts ready for k3s installation - hosts: k3s_cluster gather_facts: yes become: yes @@ -10,11 +11,13 @@ - role: download - role: raspberrypi +# Install the k3s servers - hosts: k3s_server become: yes roles: - role: k3s/server +# Install the k3s agents - hosts: k3s_agent become: yes roles: From c139d7679d3f46b9c6e28e2890ed327351677bd2 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 27 Dec 2021 01:20:49 -0500 Subject: [PATCH 065/108] Added wait for control-plane before configuring agents Signed-off-by: Jon S. Stumpf --- playbook/site.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/playbook/site.yml b/playbook/site.yml index e19ebb801..58c2d81e8 100644 --- a/playbook/site.yml +++ b/playbook/site.yml @@ -17,6 +17,16 @@ roles: - role: k3s/server +# Wait for control-plane before setting up agents +- hosts: 127.0.0.1 + connection: local + tasks: + - name: Wait for control-plane at {{ apiserver_endpoint }}:6443 + wait_for: + host: "{{ apiserver_endpoint }}" + port: "6443" + timeout: 10 + # Install the k3s agents - hosts: k3s_agent become: yes From b59c37da5de8f480ee73dd73f1b46dbda4110f3a Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 27 Dec 2021 11:53:16 -0500 Subject: [PATCH 066/108] Updated text in README.md Signed-off-by: Jon S. Stumpf --- README.md | 41 +++++++++-------- inventory/sample/group_vars/README.md | 65 ++++++++++++++------------- 2 files changed, 55 insertions(+), 51 deletions(-) diff --git a/README.md b/README.md index 41e1fe52d..e32e50f9e 100644 --- a/README.md +++ b/README.md @@ -1,35 +1,33 @@ -# Build a Kubernetes cluster using k3s via Ansible +# Build a Kubernetes cluster using *k3s* with *ansible* Author: -## K3s Ansible Playbook +## Introduction to *k3s-ansible* -Build a Kubernetes cluster using Ansible with k3s. The goal is easily install a Kubernetes cluster on machines running: +The goal of *k3s-ansible* is to easily install a Kubernetes cluster on a variety of operating systems running on machines with different architectures. +The intention is to support what *k3s* supports. Here is what has been tested (:heavy_check_mark:) with *k3s-ansible*. -- [X] Debian -- [X] Ubuntu -- [X] CentOS - -on processor architecture: - -- [X] x64 -- [X] arm64 -- [X] armhf +| Operating System | amd64 | arm64 | armhf | +| :--------------- | :---: | :---: | :---: | +| Debian | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| Ubuntu | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | +| CentOS | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | ## System requirements -Deployment environment must have Ansible 2.4.0+. -Server and agents must have passwordless SSH access. +- The deployment environment must have *ansible* v2.4.0+. +- Hosts in the cluster must have password-less *ssh* access. ## Usage -First create a new directory based on the `sample` directory within the `inventory` directory: +1. Create a new cluster definition based on the `inventory/sample` directory. ```bash cp -R inventory/sample inventory/my-cluster ``` -Second, edit `inventory/my-cluster/hosts.ini` to match the system information gathered above. For example: +2. Edit `inventory/my-cluster/hosts.ini` to include the hosts that will make up your new cluster. +For example: ```bash [k3s_server] @@ -43,10 +41,10 @@ k3s_server k3s_agent ``` -Third, edit `inventory/my-cluster/group_vars/all.yml` to best match your environment. +3. Edit `inventory/my-cluster/group_vars/all.yml` to best match your environment. See, `inventory/sample/group_vars/README.md` for more details. -Start provisioning of the cluster using the following command: +4. Provision your new cluster. ```bash ansible-playbook playbook/site.yml -i inventory/my-cluster/hosts.ini @@ -61,11 +59,12 @@ kubectl --kubeconfig playbook/cluster.conf ... ``` ## High Availability -If you enable high availability (`ha_enabled`), the playbook will setup an embedded database using **etcd**. -High availability requires at least k3s version **v1.19.5+k3s1** and an odd number of servers (minimum of three). +*k3s-ansible* can now configure a high-availability (HA) cluster. +If you enable HA (**ha_enabled**), the playbook will setup an embedded database using *etcd*. +HA requires at least version **v1.19.5+k3s1** and an odd number of servers (minimum of three). See the [HA-embedded documentation](https://rancher.com/docs/k3s/latest/en/installation/ha-embedded/) for more details. -HA expects that there is a virtual IP (`ha_cluster_vip`) in front of the control-plane servers. +HA expects that there is a virtual IP (**ha_cluster_vip**) in front of the *control-plane* servers. A few methods have been implemented to provide and manage this VIP. See `inventory/sample/group_vars/README.md` for more details. diff --git a/inventory/sample/group_vars/README.md b/inventory/sample/group_vars/README.md index 36055d45b..45db31254 100644 --- a/inventory/sample/group_vars/README.md +++ b/inventory/sample/group_vars/README.md @@ -8,7 +8,7 @@ Those variables are used within the playbooks and roles are not meant to be chan When adding a new _install_ variable, a corresponding variable is added to `playbook/group_vars/all.yml` which is then used throughout *k3s-ansible*. -## General Flags +## General Variables - **ansible_user**: specifies the username that has *ssh* password-less access to configure your hosts. The default is `debian`. @@ -16,7 +16,7 @@ The default is `debian`. - **cluster_config**: specifies the location of where to capture the kube configuration file for the new cluster. The default is `playbook/cluster.conf`. -## High-Availability (HA) Flags +## High-Availability (HA) Variables - **ha_enabled**: specifies if the cluster will have an HA embedded database using *etcd*. The default is `false`. @@ -29,25 +29,24 @@ It is possible to get an IP address dynamically but that is not implemented here - **ha_cluster_method**: specifies the method of clustering to use for the virtual IP. The methods implemented today are: - 1. `external` - requires a load-balancer external to the cluster - 2. `kube-vip` - [https://kube-vip.io](https://kube-vip.io), arp-based daemonset using leader election + 1. `external` - requires a load-balancer external to the cluster + 2. `kube-vip` - [https://kube-vip.io](https://kube-vip.io), arp-based daemonset using leader election Other load-balancing options are available (e.g., *keepalived*) but are not implemented here (yet). -## Install Flags +## Install Variables -If you have installed *k3s* from [https://get.k3s.io](https://get.k3s.io), these flags will be familiar. -*Install* flags are meant to duplicate the features found in the install script +If you have installed *k3s* from [https://get.k3s.io](https://get.k3s.io), these variables will be familiar. +*Install* variables are meant to duplicate the install flags and environment variables found in the install script (see [Installation Options](https://rancher.com/docs/k3s/latest/en/installation/install-options/#options-for-installation-with-script)). -Each flag has a prefix of `install_` and implements, to the extent possible, the actions of the shell script as documented below. -Note: not all *install* flags have been implemented. +Each variable has a prefix of `install_` and implements, to the extent possible, the actions of the shell script as documented below. -### Flags that control the version of *k3s* downloaded +### Variables that control the version of *k3s* downloaded -There are four (4) flags that control which version of *k3s* is installed on your hosts. +There are four (4) variables that control which version of *k3s* is installed on your hosts. - **install_k3s_commit**: specifies the commit of *k3s* to download from temporary cloud storage. -The default is to leave this `undefined` as this flag is for developers and QA use. +The default is to leave this `undefined` as this variable is for developers and QA use. - **install_k3s_version**: specifies the version of *k3s* to download from Github. If left `undefined` (the default), *ansible* will attempt to download from a channel. @@ -59,9 +58,9 @@ It is not something typically changed but is implemented for completeness sake. - **install_k3s_channel**: specifies the channel from which to get the version. The default is the `stable` channel. A typical channel used is `latest`. -### Flags that change the location of binaries and data +### Variables that change the location of binaries and data -There are three (3) flags that change the default location of files. +There are three (3) variables that change the default location of files. - **install_k3s_bin_dir**: specifies the directory to install the *k3s* binary and links. The default is `/usr/local/bin`. @@ -71,32 +70,38 @@ service and environment files. The default is `/etc/systemd/system`. - **install_k3s_data_dir**: specifies the data directory for the *k3s* service. This defaults to `/var/lib/rancher/k3s`. -Note: this is not (yet) a flag in *k3s-io/k3s*. +Note: this is not (yet) an option in *k3s-io/k3s*. -### Flags for the *k3s* executable +### Variables for the *k3s* executable -The install script from [https://get.k3s.io/](https://get.k3s.io/) has one flag (**install_k3s_exec**) to -provide extra arguments to the *k3s* executable. *k3s-ansible* uses two flags: +The install script from [https://get.k3s.io/](https://get.k3s.io/) has one flag (**INSTALL_K3S_EXEC**) to +provide extra arguments to the *k3s* executable. *k3s-ansible* uses two variables: one for servers and one for agents. These are: - **install_k3s_server_args**: the default is `''`. - **install_k3s_agent_args**: the default is `''`. -### Flags not yet implemented +### Install Flags not yet implemented -The flags that have yet to be implemented are: +The install flags that have yet to be implemented are: -- **install_k3s_skip_selinux_rpm**: If set to true, *ansible* will skip automatic installation of the *k3s* RPM. -- **install_k3s_selinux_warn**: If set to true, *ansible* will continue if the *k3s-selinux* policy is not found. -- **install_k3s_name**: specifies the name of *systemd* service to create. -- **install_k3s_type**: specifies the type of *systemd* service to create. +| Install Flag | What it does | +| :--- | :--- | +| **INSTALL_K3S_SKIP_SELINUX_RPM** | If set to true, *ansible* will skip automatic installation of the *k3s* RPM. +| **INSTALL_K3S_SELINUX_WARN** | If set to true, *ansible* will continue if the *k3s-selinux* policy is not found. +| **INSTALL_K3S_NAME** | specifies the name of *systemd* service to create. +| **INSTALL_K3S_TYPE** | specifies the type of *systemd* service to create. -### Flags that will not be implemented +Currently, nothing will happen if these are set. -Lastly, some flags did not make sense to implement with *k3s-ansible*: +### Install Flags that will not be implemented -- **install_k3s_skip_download**: *k3s-ansible* always downloads the *k3s* binary and its hash. -- **install_k3s_force_restart**: *k3s-ansible* always restarts the service. -- **install_k3s_skip_enable**: *k3s-ansible* always enables the service. -- **install_k3s_skip_start**: *k3s-ansible* always starts the service. +Lastly, some install flags did not make sense to implement with *k3s-ansible*: + +| Install Flag | What *k3s-ansible* does | +| :--- | :--- | +| **INSTALL_K3S_SKIP_DOWNLOAD** | *k3s-ansible* always downloads the *k3s* binary and its hash. | +| **INSTALL_K3S_FORCE_RESTART** | *k3s-ansible* always restarts the service. | +| **INSTALL_K3S_SKIP_ENABLE** | *k3s-ansible* always enables the service. | +| **INSTALL_K3S_SKIP_START** | *k3s-ansible* always starts the service. | From f42d11eded93adc8950a84ab409240415a669740 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 27 Dec 2021 17:47:26 -0500 Subject: [PATCH 067/108] Handled the case of an empty group_vars/all.yml file Signed-off-by: Jon S. Stumpf --- playbook/group_vars/all.yml | 3 +-- roles/k3s/server/defaults/main.yml | 5 ++++- roles/reset/defaults/main.yml | 2 ++ roles/reset/tasks/main.yml | 1 + 4 files changed, 8 insertions(+), 3 deletions(-) diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index 91c53495d..cca23211e 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -16,8 +16,7 @@ bin_dir: "{{ install_k3s_bin_dir | default('/usr/local/bin') }}" data_dir: "{{ install_k3s_data_dir | default('/var/lib/rancher/k3s') }}" first_server: "{{ hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) }}" -apiserver_endpoint: "{{ ha_cluster_vip if ha_enabled else first_server }}" - +apiserver_endpoint: "{{ (ha_cluster_vip | mandatory) if (ha_enabled | default(false)) else first_server }}" k3s_token: "MySuperSecureToken" diff --git a/roles/k3s/server/defaults/main.yml b/roles/k3s/server/defaults/main.yml index b97dc56d7..324918e81 100644 --- a/roles/k3s/server/defaults/main.yml +++ b/roles/k3s/server/defaults/main.yml @@ -1,5 +1,8 @@ --- -ansible_user: root +ansible_user: debian +cluster_config: cluster.conf +ha_enabled: false + server_init_args: >- {% if ha_enabled %} {% if ansible_host == first_server %} diff --git a/roles/reset/defaults/main.yml b/roles/reset/defaults/main.yml index 82b7ba800..b3843ae96 100644 --- a/roles/reset/defaults/main.yml +++ b/roles/reset/defaults/main.yml @@ -1,4 +1,6 @@ --- +ansible_user: debian +ha_enabled: false # Changing this flag will keep the downloaded binaries after a reset keep_binaries: false diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index cecaf014f..4070917f0 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -146,6 +146,7 @@ include_tasks: "cluster-method/{{ ha_cluster_method }}.yml" when: - ha_enabled + - ha_cluster_method is defined # # for cmd in kubectl crictl ctr; do From 494f36f15a9feccb6d158242f8ac0c8858d242e0 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 27 Dec 2021 18:33:28 -0500 Subject: [PATCH 068/108] Create simpler inventory/sample/group_vars/all.yml Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/all.yml | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/inventory/sample/group_vars/all.yml b/inventory/sample/group_vars/all.yml index ef8a8ea28..12191abef 100644 --- a/inventory/sample/group_vars/all.yml +++ b/inventory/sample/group_vars/all.yml @@ -1,23 +1,14 @@ --- +# See inventory/sample/group_vars/README.md for more options. +# If this file is empty, default values will be used for all mandatory fields. -# See README.md for more options - -# This is the user that has SSH password-less access to configure your hosts +# The user that has password-less ssh access to configure your hosts ansible_user: debian # The location of where to capture the kube config of the new cluster +# Relative paths are relative to the playbook directory. cluster_config: cluster.conf -# Set up this cluster with an HA embedded database using etcd -ha_enabled: false - -ha_cluster_vip: 192.168.1.254 -ha_cluster_method: external - -# Use the latest version +# Use the latest k3s version instead of 'stable' install_k3s_channel: 'latest' -# Flags for the k3s executable -install_k3s_server_args: '' -install_k3s_agent_args: '' - From 1b5278fde9a92917f127337644b223d231f8c76f Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 27 Dec 2021 20:26:59 -0500 Subject: [PATCH 069/108] Added configuration checks for 'ha_cluster_vip' and 'ha_cluster_method' Signed-off-by: Jon S. Stumpf --- playbook/group_vars/all.yml | 5 +++++ roles/config-check/tasks/main.yml | 21 +++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index cca23211e..6809de7e5 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -29,3 +29,8 @@ k3s_service_file_extensions: - service - service.env +# Supported HA Cluster Methods +k3s_cluster_methods: + - external + - kube-vip + diff --git a/roles/config-check/tasks/main.yml b/roles/config-check/tasks/main.yml index 5114abc94..672f2f039 100644 --- a/roles/config-check/tasks/main.yml +++ b/roles/config-check/tasks/main.yml @@ -21,6 +21,27 @@ - ha_enabled - groups['k3s_server'] | length is divisibleby 2 +- name: Check for a proper HA cluster virtual IP + fail: + msg: "When HA is enabled, 'ha_cluster_vip' must have a valid IP address." + when: + - ha_enabled + - (ha_cluster_vip is not defined) or not (ha_cluster_vip | ansible.netcommon.ipaddr) + +- name: Check that ha_cluster_method is defined + fail: + msg: "When HA is enabled, 'ha_cluster_method' must be defined." + when: + - ha_enabled + - ha_cluster_method is not defined + +- name: Check for a proper HA cluster method + fail: + msg: "'{{ ha_cluster_method }}' is not a supported HA cluster method." + when: + - ha_enabled + - ha_cluster_method not in k3s_cluster_methods + - name: Determine version to download block: - name: Determine version from channel From 5ccaad450acc5f5d62b19b1b36d7412c7c77e614 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 27 Dec 2021 20:46:48 -0500 Subject: [PATCH 070/108] Increased wait time for control-plane to 60 seconds Signed-off-by: Jon S. Stumpf --- playbook/site.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbook/site.yml b/playbook/site.yml index 58c2d81e8..e7d35e792 100644 --- a/playbook/site.yml +++ b/playbook/site.yml @@ -25,7 +25,7 @@ wait_for: host: "{{ apiserver_endpoint }}" port: "6443" - timeout: 10 + timeout: 60 # Install the k3s agents - hosts: k3s_agent From 1ed9386edf066256211c2a017accdef3068f04e4 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 27 Dec 2021 20:47:37 -0500 Subject: [PATCH 071/108] Added cleanup to the install as well for kube-vip Signed-off-by: Jon S. Stumpf --- roles/k3s/server/tasks/cluster-method/kube-vip.yml | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/roles/k3s/server/tasks/cluster-method/kube-vip.yml b/roles/k3s/server/tasks/cluster-method/kube-vip.yml index 9734f8ee7..b118cd337 100644 --- a/roles/k3s/server/tasks/cluster-method/kube-vip.yml +++ b/roles/k3s/server/tasks/cluster-method/kube-vip.yml @@ -1,4 +1,13 @@ --- +# Recommended in https://kube-vip.io/usage/k3s/ +# See "Clean Environment". +- name: Reset local interface + command: "{{ item }}" + with_items: + - ip addr flush dev lo + - ip addr add 127.0.0.1/8 dev lo + failed_when: false + - name: Create manifest directory {{ ha_cluster_method }} file: path: "{{ data_dir }}/server/manifests/{{ ha_cluster_method }}" From 2394e06b8938623b0880f6a838c1d53c6d03077e Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 27 Dec 2021 22:03:43 -0500 Subject: [PATCH 072/108] Remove cluster VIP from interface during reset Signed-off-by: Jon S. Stumpf --- roles/reset/tasks/cluster-method/kube-vip.yml | 6 ++++++ roles/reset/tasks/main.yml | 1 + 2 files changed, 7 insertions(+) diff --git a/roles/reset/tasks/cluster-method/kube-vip.yml b/roles/reset/tasks/cluster-method/kube-vip.yml index 90b31e2a2..a67c918ae 100644 --- a/roles/reset/tasks/cluster-method/kube-vip.yml +++ b/roles/reset/tasks/cluster-method/kube-vip.yml @@ -8,3 +8,9 @@ - ip addr add 127.0.0.1/8 dev lo failed_when: false +# Remove cluster VIP from interface +- name: Remove {{ ha_cluster_vip }}/32 from {{ kube_vip_interface }} + command: "ip address delete {{ ha_cluster_vip }}/32 dev {{ kube_vip_interface }}" + register: ipaddr_delete + when: ha_cluster_vip in (ansible_all_ipv4_addresses | list) + diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 4070917f0..08d9ae309 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -147,6 +147,7 @@ when: - ha_enabled - ha_cluster_method is defined + - inventory_hostname in groups['k3s_server'] # # for cmd in kubectl crictl ctr; do From dc915505feaf3e1a4eeeb36e359b8868578c9ced Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Mon, 27 Dec 2021 22:23:45 -0500 Subject: [PATCH 073/108] Eliminated unnecessary/redundant fact gathering Signed-off-by: Jon S. Stumpf --- playbook/site.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/playbook/site.yml b/playbook/site.yml index e7d35e792..4bf0da1cc 100644 --- a/playbook/site.yml +++ b/playbook/site.yml @@ -13,6 +13,7 @@ # Install the k3s servers - hosts: k3s_server + gather_facts: no become: yes roles: - role: k3s/server @@ -20,6 +21,7 @@ # Wait for control-plane before setting up agents - hosts: 127.0.0.1 connection: local + gather_facts: no tasks: - name: Wait for control-plane at {{ apiserver_endpoint }}:6443 wait_for: @@ -29,6 +31,7 @@ # Install the k3s agents - hosts: k3s_agent + gather_facts: no become: yes roles: - role: k3s/agent From c94616782d5f0a441a0eadb1ced76b0e43e23b93 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Tue, 28 Dec 2021 18:43:05 -0500 Subject: [PATCH 074/108] Renamed keep_binaries to remove_packages; Applied it to k3s-selinux package mgmt Signed-off-by: Jon S. Stumpf --- roles/reset/defaults/main.yml | 2 +- roles/reset/tasks/main.yml | 18 +++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/roles/reset/defaults/main.yml b/roles/reset/defaults/main.yml index b3843ae96..8d3989bd4 100644 --- a/roles/reset/defaults/main.yml +++ b/roles/reset/defaults/main.yml @@ -3,5 +3,5 @@ ansible_user: debian ha_enabled: false # Changing this flag will keep the downloaded binaries after a reset -keep_binaries: false +remove_packages: true diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 08d9ae309..59ddc46e3 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -193,7 +193,7 @@ file: path: "{{ item }}" state: absent - when: not keep_binaries + when: remove_packages loop: - "{{ bin_dir }}/k3s" @@ -207,21 +207,29 @@ yum: name: k3s-selinux state: remove - when: ansible_pkg_mgr == "yum" + when: + - ansible_pkg_mgr == "yum" + - remove_packages - name: Remove package k3s-selinux zypper: name: k3s-selinux state: remove - when: ansible_pkg_mgr == "zypper" + when: + - ansible_pkg_mgr == "zypper" + - remove_packages - name: Remove yum repo files shell: 'rm -f /etc/yum.repos.d/rancher-k3s-common*.repo' register: remove_repo_files - when: ansible_pkg_mgr == "yum" + when: + - ansible_pkg_mgr == "yum" + - remove_packages - name: Remove zypper repo files shell: 'rm -f /etc/zypp/repos.d/rancher-k3s-common*.repo' register: remove_repo_files - when: ansible_pkg_mgr == "zypper" + when: + - ansible_pkg_mgr == "zypper" + - remove_packages From 1577aa26cfb8432708ec989718f86ac0fe553028 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 01:05:25 -0500 Subject: [PATCH 075/108] Implemented keepalived cluster method as a role Signed-off-by: Jon S. Stumpf --- roles/ha/keepalived/defaults/main.yml | 38 +++++ roles/ha/keepalived/handlers/main.yml | 9 ++ roles/ha/keepalived/tasks/main.yml | 136 ++++++++++++++++++ .../keepalived/templates/keepalived.conf.j2 | 33 +++++ .../templates/keepalived_check_k3s.sh.j2 | 21 +++ roles/reset/ha/keepalived/tasks/main.yml | 54 +++++++ 6 files changed, 291 insertions(+) create mode 100644 roles/ha/keepalived/defaults/main.yml create mode 100644 roles/ha/keepalived/handlers/main.yml create mode 100644 roles/ha/keepalived/tasks/main.yml create mode 100644 roles/ha/keepalived/templates/keepalived.conf.j2 create mode 100644 roles/ha/keepalived/templates/keepalived_check_k3s.sh.j2 create mode 100644 roles/reset/ha/keepalived/tasks/main.yml diff --git a/roles/ha/keepalived/defaults/main.yml b/roles/ha/keepalived/defaults/main.yml new file mode 100644 index 000000000..83eebd7e0 --- /dev/null +++ b/roles/ha/keepalived/defaults/main.yml @@ -0,0 +1,38 @@ +--- + +# This file is used to configure the keepalived configuration files. It typically does +# not need to be changed but you can at your discretion to suit your environment. +# These variables are used only when HA is enabled and the method chosen is keepalived. + +keepalived_etc_dir: '/etc/keepalived' +keepalived_lib_dir: '/var/lib/keepalived' +keepalived_run_dir: '/var/run' +keepalived_tmp_dir: '/var/tmp' + +keepalived_kubeconfig_src: '/etc/rancher/k3s/k3s.yaml' + +keepalived_config_file: 'keepalived.conf' + +keepalived_script_name: 'keepalived_check_k3s.sh' +keepalived_script_output: 'keepalived_check_k3s.out' +keepalived_script_kubeconfig: 'kubeconfig' +keepalived_script_user: 'keepalived' +keepalived_script_group: 'nogroup' +keepalived_script_login: '/usr/sbin/nologin' + +keepalived_package_state: 'latest' + +keepalived_instance_name: 'K3S_VI_1' +keepalived_interface: "{{ ansible_default_ipv4.interface }}" +keepalived_vr_id: 51 +keepalived_master_priority: 11 +keepalived_backup_priority: 10 +keepalived_advertisement_interval: 1 +keepalived_network_mask: 24 +keepalived_password: 'MySuperSecretVRRPPassword' + +keepalived_check_interval: 2 +keepalived_check_timeout: 5 +keepalived_check_rise: 3 +keepalived_check_fall: 3 + diff --git a/roles/ha/keepalived/handlers/main.yml b/roles/ha/keepalived/handlers/main.yml new file mode 100644 index 000000000..dc77f2156 --- /dev/null +++ b/roles/ha/keepalived/handlers/main.yml @@ -0,0 +1,9 @@ +--- + +- name: Restart keepalived service + service: + name: keepalived + state: restarted + enabled: yes + listen: restart-keepalived-service + diff --git a/roles/ha/keepalived/tasks/main.yml b/roles/ha/keepalived/tasks/main.yml new file mode 100644 index 000000000..ff70ad3c7 --- /dev/null +++ b/roles/ha/keepalived/tasks/main.yml @@ -0,0 +1,136 @@ +--- + +################################ +# Install the keepalived package + +- name: Install package keepalived + package: + name: keepalived + state: "{{ keepalived_package_state }}" + notify: restart-keepalived-service + +###################################### +# Create the script user, if necessary + +- name: Create keepalived script user, {{ keepalived_script_user }} + user: + name: "{{ keepalived_script_user }}" + group: "{{ keepalived_script_group }}" + comment: "User to run keepalived scripts" + home: "{{ keepalived_lib_dir }}" + shell: "{{ keepalived_script_login }}" + append: no + create_home: no + system: yes + +################################## +# Install/Update the configuration + +- name: Create keepalived etc directory, {{ keepalived_etc_dir }} + file: + path: "{{ keepalived_etc_dir }}" + state: directory + owner: root + group: root + mode: "u=rwx,g=,o=" + +- name: Create keepalived lib directory, {{ keepalived_lib_dir }} + file: + path: "{{ keepalived_lib_dir }}" + state: directory + owner: "{{ keepalived_script_user }}" + group: "{{ keepalived_script_group }}" + mode: "u=rwx,g=,o=" + +# This is here for the case when someone changes the default +- name: Verify keepalived run directory exists, {{ keepalived_run_dir }} + register: run_dir + stat: + path: "{{ keepalived_run_dir }}/." # Use '/." to traverse a symlink, if necessary + failed_when: not (run_dir.stat.isdir is defined and run_dir.stat.isdir) + +# This is here for the case when someone changes the default +- name: Verify keepalived tmp directory exists, {{ keepalived_tmp_dir }} + register: tmp_dir + stat: + path: "{{ keepalived_tmp_dir }}/." # Use '/." to traverse a symlink, if necessary + failed_when: not (tmp_dir.stat.isdir is defined and tmp_dir.stat.isdir) + +- name: Create keepalived configuration, {{ path }} + vars: + path: "{{ keepalived_etc_dir }}/{{ keepalived_config_file }}" + template: + src: "keepalived.conf.j2" + dest: "{{ path }}" + owner: root + group: root + mode: "u=r,g=,o=" + notify: restart-keepalived-service + +- name: Create keepalived k3s check script, {{ path }} + vars: + path: "{{ keepalived_lib_dir }}/{{ keepalived_script_name }}" + template: + src: "keepalived_check_k3s.sh.j2" + dest: "{{ path }}" + owner: "{{ keepalived_script_user }}" + group: "{{ keepalived_script_group }}" + mode: "u=rwx,g=rx,o=rx" + +# You have to stat first because touch will ALWAYS change an existing file +- name: Check for the keepalived k3s script output file, {{ path }}" + vars: + path: "{{ keepalived_run_dir }}/{{ keepalived_script_output }}" + stat: + path: "{{ path }}" + register: output + +- name: Create keepalived k3s script output file, {{ path }} + vars: + path: "{{ keepalived_run_dir }}/{{ keepalived_script_output }}" + file: + path: "{{ path }}" + state: touch + owner: "{{ keepalived_script_user }}" + group: "{{ keepalived_script_group }}" + mode: "u=rw,g=r,o=r" + when: not output.stat.exists + +- name: Copy the kubeconfig, {{ keepalived_kubeconfig_src }}, to {{ path }} + vars: + path: "{{ keepalived_lib_dir }}/{{ keepalived_script_kubeconfig }}" + copy: + remote_src: yes + src: "{{ keepalived_kubeconfig_src }}" + dest: "{{ path }}" + owner: "{{ keepalived_script_user }}" + group: "{{ keepalived_script_group }}" + mode: "u=r,g=,o=" + +##################################### +# Ensure non-local binding is enabled + +- name: Ensure ipv4 nonlocal binding is enabled + sysctl: + name: net.ipv4.ip_nonlocal_bind + value: "1" + state: present + reload: yes + +- name: Ensure ipv6 nonlocal binding is enabled + sysctl: + name: net.ipv6.ip_nonlocal_bind + value: "1" + state: present + reload: yes + +################### +# Start the service + +- name: Start keepalived service + register: started + service: + name: keepalived + enabled: yes + state: started + diff --git a/roles/ha/keepalived/templates/keepalived.conf.j2 b/roles/ha/keepalived/templates/keepalived.conf.j2 new file mode 100644 index 000000000..c7dd09dc8 --- /dev/null +++ b/roles/ha/keepalived/templates/keepalived.conf.j2 @@ -0,0 +1,33 @@ + +vrrp_script k3s_check { + script "{{ keepalived_lib_dir }}/{{ keepalived_script_name }}" + user {{ keepalived_script_user }} + interval {{ keepalived_check_interval }} + timeout {{ keepalived_check_timeout }} + rise {{ keepalived_check_rise }} + fall {{ keepalived_check_fall }} +} + +vrrp_instance {{ keepalived_instance_name }} { +{% if ansible_host == first_server %} + state MASTER + priority {{ keepalived_master_priority }} +{% else %} + state BACKUP + priority {{ keepalived_backup_priority }} +{% endif %} + interface {{ keepalived_interface }} + virtual_router_id {{ keepalived_vr_id }} + advert_int {{ keepalived_advertisement_interval }} + authentication { + auth_type PASS + auth_pass {{ keepalived_password }} + } + virtual_ipaddress { + {{ ha_cluster_vip }}/{{ keepalived_network_mask }} + } + track_script { + k3s_check + } +} + diff --git a/roles/ha/keepalived/templates/keepalived_check_k3s.sh.j2 b/roles/ha/keepalived/templates/keepalived_check_k3s.sh.j2 new file mode 100644 index 000000000..3b0b91a13 --- /dev/null +++ b/roles/ha/keepalived/templates/keepalived_check_k3s.sh.j2 @@ -0,0 +1,21 @@ +#!/bin/bash + +OUTPUT_FILE="{{ keepalived_run_dir }}/{{ keepalived_script_output }}" +TMP_FILE="{{ keepalived_tmp_dir }}/{{ keepalived_script_output }}-${$}" + +export KUBECONFIG="{{ keepalived_lib_dir }}/{{ keepalived_script_kubeconfig }}" + +trap 'rm -f "${TMP_FILE}"' EXIT + +{{ bin_dir }}/k3s kubectl get --raw='/livez?verbose' 1> "${TMP_FILE}" + +if [ "${?}" != 0 ] +then + exit 1 +fi + +date 1>> "${TMP_FILE}" + +# Don't assume mv will work +cp "${TMP_FILE}" "${OUTPUT_FILE}" + diff --git a/roles/reset/ha/keepalived/tasks/main.yml b/roles/reset/ha/keepalived/tasks/main.yml new file mode 100644 index 000000000..2f46f337e --- /dev/null +++ b/roles/reset/ha/keepalived/tasks/main.yml @@ -0,0 +1,54 @@ +--- + +- name: Include vars + include_vars: "../roles/ha/keepalived/defaults/main.yml" + +################### +# Stop the service + +- name: Stop keepalived service + service: + name: keepalived + enabled: no + state: stopped + +##################################################### +# Make sure the cluster VIP is removed from interface +# Note: keepalived does this for us but just in case + +- name: Remove {{ ha_cluster_vip }}/{{ keepalived_network_mask }} from {{ keepalived_interface }} + command: "ip address delete {{ ha_cluster_vip }}/{{ keepalived_network_mask }} dev {{ keepalived_interface }}" + register: ipaddr_delete + when: ha_cluster_vip in (ansible_all_ipv4_addresses | list) + +################################## +# Delete the configuration + +- name: Delete the keepalived configuration + file: + path: "{{ item }}" + state: absent + loop: + - "{{ keepalived_etc_dir }}" + - "{{ keepalived_lib_dir }}" + - "{{ keepalived_run_dir }}/{{ keepalived_script_output }}" +# - "{{ keepalived_etc_dir }}/{{ keepalived_config_file }}" +# - "{{ keepalived_lib_dir }}/{{ keepalived_script_name }}" +# - "{{ keepalived_lib_dir }}/{{ keepalived_script_kubeconfig }}" + +- name: Remove keepalived script user, {{ keepalived_script_user }} + user: + name: "{{ keepalived_script_user }}" + state: absent + remove: yes + force: yes + +################################## +# Uninstall the keepalived package + +- name: Uninstall package keepalived + package: + name: keepalived + state: absent + when: remove_packages | default(true) | bool + From 5041688aa86d58a81cfb7a61f10b6de09f821997 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 01:14:53 -0500 Subject: [PATCH 076/108] Turned kube-vip cluster method into roles Signed-off-by: Jon S. Stumpf --- .../kube-vip.yml => roles/ha/kube-vip/defaults/main.yml | 0 .../cluster-method/kube-vip.yml => ha/kube-vip/tasks/main.yml} | 0 .../kube-vip => ha/kube-vip/templates}/kube-vip-arp-ds.yaml.j2 | 0 .../kube-vip => ha/kube-vip/templates}/kube-vip-rbac.yaml.j2 | 0 .../cluster-method/kube-vip.yml => ha/kube-vip/tasks/main.yml} | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename inventory/sample/group_vars/k3s_server/kube-vip.yml => roles/ha/kube-vip/defaults/main.yml (100%) rename roles/{k3s/server/tasks/cluster-method/kube-vip.yml => ha/kube-vip/tasks/main.yml} (100%) rename roles/{k3s/server/templates/cluster-method/kube-vip => ha/kube-vip/templates}/kube-vip-arp-ds.yaml.j2 (100%) rename roles/{k3s/server/templates/cluster-method/kube-vip => ha/kube-vip/templates}/kube-vip-rbac.yaml.j2 (100%) rename roles/reset/{tasks/cluster-method/kube-vip.yml => ha/kube-vip/tasks/main.yml} (100%) diff --git a/inventory/sample/group_vars/k3s_server/kube-vip.yml b/roles/ha/kube-vip/defaults/main.yml similarity index 100% rename from inventory/sample/group_vars/k3s_server/kube-vip.yml rename to roles/ha/kube-vip/defaults/main.yml diff --git a/roles/k3s/server/tasks/cluster-method/kube-vip.yml b/roles/ha/kube-vip/tasks/main.yml similarity index 100% rename from roles/k3s/server/tasks/cluster-method/kube-vip.yml rename to roles/ha/kube-vip/tasks/main.yml diff --git a/roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-arp-ds.yaml.j2 b/roles/ha/kube-vip/templates/kube-vip-arp-ds.yaml.j2 similarity index 100% rename from roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-arp-ds.yaml.j2 rename to roles/ha/kube-vip/templates/kube-vip-arp-ds.yaml.j2 diff --git a/roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-rbac.yaml.j2 b/roles/ha/kube-vip/templates/kube-vip-rbac.yaml.j2 similarity index 100% rename from roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-rbac.yaml.j2 rename to roles/ha/kube-vip/templates/kube-vip-rbac.yaml.j2 diff --git a/roles/reset/tasks/cluster-method/kube-vip.yml b/roles/reset/ha/kube-vip/tasks/main.yml similarity index 100% rename from roles/reset/tasks/cluster-method/kube-vip.yml rename to roles/reset/ha/kube-vip/tasks/main.yml From 50d28e5afa229f4755756abe1e27cf3605806192 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 14:10:36 -0500 Subject: [PATCH 077/108] Converted kube-vip cluster method to a role Signed-off-by: Jon S. Stumpf --- roles/ha/kube-vip/defaults/main.yml | 6 +-- roles/ha/kube-vip/tasks/main.yml | 23 +++++++-- .../server/tasks/cluster-method/external.yml | 2 - roles/k3s/server/tasks/main.yml | 5 +- roles/reset/ha/kube-vip/tasks/main.yml | 49 ++++++++++++++++--- roles/reset/tasks/cluster-method/external.yml | 3 -- 6 files changed, 67 insertions(+), 21 deletions(-) delete mode 100644 roles/k3s/server/tasks/cluster-method/external.yml delete mode 100644 roles/reset/tasks/cluster-method/external.yml diff --git a/roles/ha/kube-vip/defaults/main.yml b/roles/ha/kube-vip/defaults/main.yml index 31cc2eb43..a8c596bac 100644 --- a/roles/ha/kube-vip/defaults/main.yml +++ b/roles/ha/kube-vip/defaults/main.yml @@ -3,10 +3,11 @@ # This file is used to configure the kube-vip manifest files. It typically does # not need to be changed but you can at your discretion to suit your environment. # These variables are used only when HA is enabled and the method chosen is kube-vip. +# More options can be found in ../templates//kube-vip-arp-ds.yaml.j2 # These are commonly changed values for kube-vip. kube_vip_version: 'v0.4.0' -kube_vip_interface: 'eth0' +kube_vip_interface: "{{ ansible_default_ipv4.interface }}" # These are less commonly changed values for kube-vip. kube_vip_cidr: '32' @@ -21,6 +22,3 @@ kube_vip_serviceaccount_name: 'kube-vip' kube_vip_clusterrole_name: 'kube-vip-role' kube_vip_clusterrolebinding_name: 'kube-vip-binding' -# More options can be found in ... -# roles/k3s/server/templates/cluster-method/kube-vip/kube-vip-arp-ds.yaml.j2 - diff --git a/roles/ha/kube-vip/tasks/main.yml b/roles/ha/kube-vip/tasks/main.yml index b118cd337..e51fdf4d6 100644 --- a/roles/ha/kube-vip/tasks/main.yml +++ b/roles/ha/kube-vip/tasks/main.yml @@ -1,25 +1,36 @@ --- + +############################################### # Recommended in https://kube-vip.io/usage/k3s/ # See "Clean Environment". + - name: Reset local interface command: "{{ item }}" with_items: - ip addr flush dev lo - ip addr add 127.0.0.1/8 dev lo failed_when: false + changed_when: false -- name: Create manifest directory {{ ha_cluster_method }} - file: +####################################################### +# Deploy kube-vip manifests +# TODO: Figure out how to do this using kubernetes.core + +- name: Create manifest directory {{ path }} + vars: path: "{{ data_dir }}/server/manifests/{{ ha_cluster_method }}" + file: + path: "{{ path }}" state: directory owner: root group: root mode: "u=rwx,g=,o=" register: manifest_dir +# TODO: Should we download this instead? (See Step 2, https://kube-vip.chipzoller.dev/docs/usage/k3s/) - name: Copy RBAC manifest template: - src: "cluster-method/{{ ha_cluster_method }}/kube-vip-rbac.yaml.j2" + src: "kube-vip-rbac.yaml.j2" dest: "{{ manifest_dir.path }}/kube-vip-rbac.yaml" owner: root group: root @@ -27,9 +38,13 @@ - name: Copy arp daemonset manifest template: - src: "cluster-method/{{ ha_cluster_method }}/kube-vip-arp-ds.yaml.j2" + src: "kube-vip-arp-ds.yaml.j2" dest: "{{ manifest_dir.path }}/kube-vip-arp-ds.yaml" owner: root group: root mode: "u=rw,g=,o=" +- name: Apply kube-vip manifests + command: "{{ bin_dir }}/k3s kubectl apply -f {{ manifest_dir.path }}" + run_once: true + diff --git a/roles/k3s/server/tasks/cluster-method/external.yml b/roles/k3s/server/tasks/cluster-method/external.yml deleted file mode 100644 index 55d3cbcc7..000000000 --- a/roles/k3s/server/tasks/cluster-method/external.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -# Nothing (at the moment) to do for an external load-balancer. diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index 8a92fdd07..ed0e68ab6 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -113,10 +113,11 @@ owner: "{{ ansible_user }}" mode: "u=rw,g=,o=" -- name: Replace https://localhost:6443 by https://{{ apiserver_endpoint }}:6443 +# Use first_server here; Let the ha_cluster_method update to apiserver_endpoint +- name: Replace https://localhost:6443 by https://{{ first_server }}:6443 command: >- {{ bin_dir }}/kubectl config set-cluster default - --server=https://{{ apiserver_endpoint }}:6443 + --server=https://{{ first_server }}:6443 --kubeconfig ~{{ ansible_user }}/.kube/config changed_when: true diff --git a/roles/reset/ha/kube-vip/tasks/main.yml b/roles/reset/ha/kube-vip/tasks/main.yml index a67c918ae..eff1ea5b4 100644 --- a/roles/reset/ha/kube-vip/tasks/main.yml +++ b/roles/reset/ha/kube-vip/tasks/main.yml @@ -1,16 +1,53 @@ --- + +- name: Include vars + include_vars: "../roles/ha/kube-vip/defaults/main.yml" + +################################################# +# Remove kube-vip +# TODO: Learn how to do this with kubernetes.core + +- name: Remove kube-vip resource and files + vars: + path: "{{ data_dir }}/server/manifests/{{ ha_cluster_method }}" + block: + - name: Check for the manifest directory, {{ path }} + register: manifest_dir + stat: + path: "{{ path }}/." # Use /. just in case it is a symlink + + - name: Remove kube-vip resources found in {{ path }} + register: resources + shell: "{{ bin_dir }}/kubectl delete -f {{ path }}" + run_once: true + when: + - manifest_dir.stat.isdir is defined + - manifest_dir.stat.isdir + + - name: Remove manifest directory, {{ path }} + register: files + file: + path: "{{ path }}" + state: absent + +################################### +# Remove cluster VIP from interface +# kube-vip does not do this for you + +- name: Remove {{ ha_cluster_vip }}/{{ kube_vip_cidr }} from {{ kube_vip_interface }} + command: "ip address delete {{ ha_cluster_vip }}/{{ kube_vip_cidr }} dev {{ kube_vip_interface }}" + register: ipaddr_delete + when: ha_cluster_vip in (ansible_all_ipv4_addresses | list) + +############################################### # Recommended in https://kube-vip.io/usage/k3s/ # See "Clean Environment". + - name: Reset local interface command: "{{ item }}" with_items: - ip addr flush dev lo - ip addr add 127.0.0.1/8 dev lo failed_when: false - -# Remove cluster VIP from interface -- name: Remove {{ ha_cluster_vip }}/32 from {{ kube_vip_interface }} - command: "ip address delete {{ ha_cluster_vip }}/32 dev {{ kube_vip_interface }}" - register: ipaddr_delete - when: ha_cluster_vip in (ansible_all_ipv4_addresses | list) + changed_when: false diff --git a/roles/reset/tasks/cluster-method/external.yml b/roles/reset/tasks/cluster-method/external.yml deleted file mode 100644 index 69f228c0f..000000000 --- a/roles/reset/tasks/cluster-method/external.yml +++ /dev/null @@ -1,3 +0,0 @@ ---- -# Nothing (at the moment) to do for an external load-balancer. - From fd9dce40527728f0631a040bc97653155274e6e9 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 16:18:37 -0500 Subject: [PATCH 078/108] Fixed roles k3s/server and reset to use HA cluster method roles Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/README.md | 3 +-- playbook/group_vars/all.yml | 1 + roles/k3s/server/tasks/main.yml | 22 +++++++++++------ roles/reset/tasks/main.yml | 34 +++++++++++++-------------- 4 files changed, 33 insertions(+), 27 deletions(-) diff --git a/inventory/sample/group_vars/README.md b/inventory/sample/group_vars/README.md index 45db31254..f5ffe0fd3 100644 --- a/inventory/sample/group_vars/README.md +++ b/inventory/sample/group_vars/README.md @@ -31,8 +31,7 @@ It is possible to get an IP address dynamically but that is not implemented here The methods implemented today are: 1. `external` - requires a load-balancer external to the cluster 2. `kube-vip` - [https://kube-vip.io](https://kube-vip.io), arp-based daemonset using leader election - -Other load-balancing options are available (e.g., *keepalived*) but are not implemented here (yet). + 3. `keepalived` - all *k3s* servers are configured with [keepalived](https://www.redhat.com/sysadmin/keepalived-basics) to manage a VRRP instance ## Install Variables diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index 6809de7e5..0e7f6cfa6 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -33,4 +33,5 @@ k3s_service_file_extensions: k3s_cluster_methods: - external - kube-vip + - keepalived diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index ed0e68ab6..ab21fd765 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -8,10 +8,6 @@ include_tasks: k3s-init.yml when: ha_enabled -- name: Perform HA-specific tasks - include_tasks: "cluster-method/{{ ha_cluster_method }}.yml" - when: ha_enabled - ################################################################################ # Setup k3s service # @@ -113,11 +109,10 @@ owner: "{{ ansible_user }}" mode: "u=rw,g=,o=" -# Use first_server here; Let the ha_cluster_method update to apiserver_endpoint -- name: Replace https://localhost:6443 by https://{{ first_server }}:6443 +- name: Replace https://localhost:6443 by https://{{ apiserver_endpoint }}:6443 command: >- {{ bin_dir }}/kubectl config set-cluster default - --server=https://{{ first_server }}:6443 + --server=https://{{ apiserver_endpoint }}:6443 --kubeconfig ~{{ ansible_user }}/.kube/config changed_when: true @@ -128,3 +123,16 @@ dest: "{{ cluster_config }}" flat: yes run_once: true + +########################## +# Set up HA cluster method + +- name: Include role, {{ role }} + vars: + role: "ha/{{ ha_cluster_method }}" + include_role: + name: "{{ role }}" + when: + - ha_enabled + - ha_cluster_method != 'external' + diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 59ddc46e3..ce7207126 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -1,7 +1,17 @@ --- -# TODO: Should we use k3s-uninstall.sh from https://get/k3s.io instead so that -# we don't have to track the updates in the future? +########################## +# Remove HA cluster method + +- name: Include role, {{ role }} + vars: + role: "reset/ha/{{ ha_cluster_method }}" + include_role: + name: "{{ role }}" + when: + - ha_enabled + - ha_cluster_method != 'external' + - inventory_hostname in groups['k3s_server'] ########################### # Start of k3s-killall.sh # @@ -139,16 +149,6 @@ # # Should this be done rather than focusing on a discrete list of services (k3s_services)? -# -# Perform HA cluster-method-specific tasks -# -- name: Remove HA cluster method - include_tasks: "cluster-method/{{ ha_cluster_method }}.yml" - when: - - ha_enabled - - ha_cluster_method is defined - - inventory_hostname in groups['k3s_server'] - # # for cmd in kubectl crictl ctr; do # if [ -L {{ bin_dir }}/$cmd ]; then @@ -186,16 +186,14 @@ - "{{ data_dir }}" - /var/lib/kubelet - "{{ bin_dir }}/k3s.sh" - - "{{ bin_dir }}/k3s-killall.sh" - - "{{ bin_dir }}/k3s-uninstall.sh" + - "{{ bin_dir }}/k3s-killall.sh" # From https://get.k3s.io + - "{{ bin_dir }}/k3s-uninstall.sh" # From https://get.k3s.io -- name: Remove downloaded binaries +- name: Remove downloaded k3s binary file: - path: "{{ item }}" + path: "{{ bin_dir }}/k3s" state: absent when: remove_packages - loop: - - "{{ bin_dir }}/k3s" - name: Remove ~{{ ansible_user }}/.kube/config file: From 93184c96015ef2c2c798898651a7eac02099963b Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 16:46:31 -0500 Subject: [PATCH 079/108] Converted k3s-init.yml to a role, ha/etcd Signed-off-by: Jon S. Stumpf --- roles/ha/etcd/defaults/main.yml | 12 ++++++++++++ .../tasks/k3s-init.yml => ha/etcd/tasks/main.yml} | 0 roles/k3s/server/defaults/main.yml | 12 +----------- roles/k3s/server/tasks/main.yml | 5 +++-- 4 files changed, 16 insertions(+), 13 deletions(-) create mode 100644 roles/ha/etcd/defaults/main.yml rename roles/{k3s/server/tasks/k3s-init.yml => ha/etcd/tasks/main.yml} (100%) diff --git a/roles/ha/etcd/defaults/main.yml b/roles/ha/etcd/defaults/main.yml new file mode 100644 index 000000000..024b23d3d --- /dev/null +++ b/roles/ha/etcd/defaults/main.yml @@ -0,0 +1,12 @@ +--- + +server_init_args: >- + {% if ansible_host == first_server %} + --cluster-init + --tls-san {{ apiserver_endpoint }} + {% else %} + --server https://{{ first_server }}:6443 + {% endif %} + --token {{ k3s_token }} + {{ extra_server_args | default('') }} + diff --git a/roles/k3s/server/tasks/k3s-init.yml b/roles/ha/etcd/tasks/main.yml similarity index 100% rename from roles/k3s/server/tasks/k3s-init.yml rename to roles/ha/etcd/tasks/main.yml diff --git a/roles/k3s/server/defaults/main.yml b/roles/k3s/server/defaults/main.yml index 324918e81..03c29b68f 100644 --- a/roles/k3s/server/defaults/main.yml +++ b/roles/k3s/server/defaults/main.yml @@ -1,16 +1,6 @@ --- + ansible_user: debian cluster_config: cluster.conf ha_enabled: false -server_init_args: >- - {% if ha_enabled %} - {% if ansible_host == first_server %} - --cluster-init - --tls-san {{ apiserver_endpoint }} - {% else %} - --server https://{{ first_server }}:6443 - {% endif %} - --token {{ k3s_token }} - {% endif %} - {{ extra_server_args | default('') }} diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index ab21fd765..af04059e4 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -4,8 +4,9 @@ # Setup servers in cluster using k3s-init # -- name: Initialize k3s cluster - include_tasks: k3s-init.yml +- name: Initialize k3s HA embedded database, etcd + include_role: + name: "ha/etcd" when: ha_enabled ################################################################################ From 12b7b3f13752233a256ed8fef325eabdc0a0dc74 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 18:41:11 -0500 Subject: [PATCH 080/108] Fixed tasks that reported changes when there weren't Signed-off-by: Jon S. Stumpf --- roles/reset/tasks/main.yml | 26 +++++++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index ce7207126..1503d8825 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -61,6 +61,7 @@ - name: Show CNI namespaces register: ip_netns_show command: ip -j netns show master cni0 + changed_when: false # # BUG: Possible bug in ip-netns(8) on Raspbian. @@ -81,6 +82,7 @@ - name: Get list of network interface(s) that match 'master cni0' register: ip_link_show shell: ip -j link show master cni0 || echo "[ ]" + changed_when: false - name: Remove CNI interfaces command: ip link delete {{ item }} @@ -106,9 +108,27 @@ # iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore # TODO: Replace with appropriate ansible module # -- name: Remove KUBE and CNI chains from iptables - register: iptables_save_restore - shell: iptables-save | egrep -v '(KUBE|CNI)-' | iptables-restore + +- name: Dump iptables + register: iptables_saved + command: "iptables-save" + changed_when: false + +- name: Filter entries using regex pattern "{{ pattern }}" + vars: + pattern: '(KUBE|CNI)-' + set_fact: + iptables_filtered: >- + {{ + iptables_saved.stdout + | regex_replace('^.*'+pattern+'.*$', '#', multiline=true) + }} + +- name: Remove "(KUBE|CNI)-" iptables entries + command: + cmd: "iptables-restore" + stdin: "{{ iptables_filtered }}" + when: iptables_saved.stdout != iptables_filtered ######################### # End of k3s-killall.sh # From ef262fe10d4c3fdb25f2ecc18f5096ab5b97fe22 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 18:49:35 -0500 Subject: [PATCH 081/108] Only remove command files from groups['k3s_server'] Signed-off-by: Jon S. Stumpf --- roles/reset/tasks/main.yml | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 1503d8825..7519c277a 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -176,23 +176,26 @@ # fi # done # -- name: Command files - register: stat_command_files - stat: - path: "{{ bin_dir }}/{{ item }}" - loop: - - kubectl - - crictl - - ctr +- name: Remove command files if they are symlink'd + block: + - name: Stat command files + register: stat_command_files + stat: + path: "{{ bin_dir }}/{{ item }}" + loop: + - kubectl + - crictl + - ctr -- name: Remove command files - file: - path: "{{ item.stat.path }}" - state: absent - when: (item.stat.exists | default(false)) and (item.stat.islnk | default(false)) - loop: "{{ stat_command_files.results }}" - loop_control: - label: "{{ item.item }}" + - name: Remove command files + file: + path: "{{ item.stat.path }}" + state: absent + when: (item.stat.exists | default(false)) and (item.stat.islnk | default(false)) + loop: "{{ stat_command_files.results }}" + loop_control: + label: "{{ item.item }}" + when: inventory_hostname in groups['k3s_server'] # Remove files and directories - name: Remove files and data From 1f7b031540ad65944236e8d890e4f5d50d110a57 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 18:52:15 -0500 Subject: [PATCH 082/108] Removed unnecessary task that always threw an error Signed-off-by: Jon S. Stumpf --- roles/reset/ha/keepalived/tasks/main.yml | 9 --------- 1 file changed, 9 deletions(-) diff --git a/roles/reset/ha/keepalived/tasks/main.yml b/roles/reset/ha/keepalived/tasks/main.yml index 2f46f337e..bdd51f5ef 100644 --- a/roles/reset/ha/keepalived/tasks/main.yml +++ b/roles/reset/ha/keepalived/tasks/main.yml @@ -12,15 +12,6 @@ enabled: no state: stopped -##################################################### -# Make sure the cluster VIP is removed from interface -# Note: keepalived does this for us but just in case - -- name: Remove {{ ha_cluster_vip }}/{{ keepalived_network_mask }} from {{ keepalived_interface }} - command: "ip address delete {{ ha_cluster_vip }}/{{ keepalived_network_mask }} dev {{ keepalived_interface }}" - register: ipaddr_delete - when: ha_cluster_vip in (ansible_all_ipv4_addresses | list) - ################################## # Delete the configuration From 309e5d7312c60d24aa7fcc6ee73bff232f45b707 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 19:03:14 -0500 Subject: [PATCH 083/108] Added check if architecture isn't supported Signed-off-by: Jon S. Stumpf --- roles/download/tasks/main.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/roles/download/tasks/main.yml b/roles/download/tasks/main.yml index 766f1c2ae..c27ef7c75 100644 --- a/roles/download/tasks/main.yml +++ b/roles/download/tasks/main.yml @@ -25,6 +25,11 @@ - ansible_facts.architecture is search("arm") - ansible_facts.userspace_bits == "32" +- name: Skip if architecture ({{ ansible_facts.architecture }}) is supported + fail: + msg: "This host does not have a supported architecture ({{ ansible_facts.architecture }})." + when: k3s_arch is not defined + # Determine URLs for download - name: Determine Github URLs set_fact: From de0fd8c5d7b62927d55594b104569a72d3c3cdce Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 19:12:39 -0500 Subject: [PATCH 084/108] Fixed name: string to be consistent with others Signed-off-by: Jon S. Stumpf --- roles/raspberrypi/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/raspberrypi/tasks/main.yml b/roles/raspberrypi/tasks/main.yml index b80c91fec..78e13a621 100644 --- a/roles/raspberrypi/tasks/main.yml +++ b/roles/raspberrypi/tasks/main.yml @@ -39,7 +39,7 @@ when: - detected_distribution | default("") == "Raspbian" -- name: execute OS related tasks on the Raspberry Pi +- name: Execute OS related tasks on the Raspberry Pi include_tasks: "{{ item }}" with_first_found: - "prereq/{{ detected_distribution }}-{{ detected_distribution_major_version }}.yml" From c2cb97e3d11c0ee3f27f424ad461aea6e10b2da0 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 19:27:45 -0500 Subject: [PATCH 085/108] Removed ~{{ ansible_user }}/.kube directory, not just the config Signed-off-by: Jon S. Stumpf --- roles/reset/tasks/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index 7519c277a..c8c18249c 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -218,9 +218,9 @@ state: absent when: remove_packages -- name: Remove ~{{ ansible_user }}/.kube/config +- name: Remove ~{{ ansible_user }}/.kube directory file: - path: "~{{ ansible_user }}/.kube/config" + path: "~{{ ansible_user }}/.kube" state: absent when: inventory_hostname in groups['k3s_server'] From 4a8adcf5a5bf1f596c91f95b235b9a0478dba683 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 19:44:52 -0500 Subject: [PATCH 086/108] Ensured remove_packages is treated as a boolean Signed-off-by: Jon S. Stumpf --- roles/ha/keepalived/defaults/main.yml | 1 + roles/reset/ha/keepalived/tasks/main.yml | 2 +- roles/reset/tasks/main.yml | 10 +++++----- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/roles/ha/keepalived/defaults/main.yml b/roles/ha/keepalived/defaults/main.yml index 83eebd7e0..03b2ec6db 100644 --- a/roles/ha/keepalived/defaults/main.yml +++ b/roles/ha/keepalived/defaults/main.yml @@ -21,6 +21,7 @@ keepalived_script_group: 'nogroup' keepalived_script_login: '/usr/sbin/nologin' keepalived_package_state: 'latest' +keepalived_package_remove: remove_packages | default(true) | bool keepalived_instance_name: 'K3S_VI_1' keepalived_interface: "{{ ansible_default_ipv4.interface }}" diff --git a/roles/reset/ha/keepalived/tasks/main.yml b/roles/reset/ha/keepalived/tasks/main.yml index bdd51f5ef..68375cbaa 100644 --- a/roles/reset/ha/keepalived/tasks/main.yml +++ b/roles/reset/ha/keepalived/tasks/main.yml @@ -41,5 +41,5 @@ package: name: keepalived state: absent - when: remove_packages | default(true) | bool + when: keepalived_package_remove | bool diff --git a/roles/reset/tasks/main.yml b/roles/reset/tasks/main.yml index c8c18249c..dbe8d1bfb 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/tasks/main.yml @@ -216,7 +216,7 @@ file: path: "{{ bin_dir }}/k3s" state: absent - when: remove_packages + when: remove_packages | bool - name: Remove ~{{ ansible_user }}/.kube directory file: @@ -230,7 +230,7 @@ state: remove when: - ansible_pkg_mgr == "yum" - - remove_packages + - remove_packages | bool - name: Remove package k3s-selinux zypper: @@ -238,19 +238,19 @@ state: remove when: - ansible_pkg_mgr == "zypper" - - remove_packages + - remove_packages | bool - name: Remove yum repo files shell: 'rm -f /etc/yum.repos.d/rancher-k3s-common*.repo' register: remove_repo_files when: - ansible_pkg_mgr == "yum" - - remove_packages + - remove_packages | bool - name: Remove zypper repo files shell: 'rm -f /etc/zypp/repos.d/rancher-k3s-common*.repo' register: remove_repo_files when: - ansible_pkg_mgr == "zypper" - - remove_packages + - remove_packages | bool From db52cc93f60bccefecb787629f1194ab01ed624b Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 19:53:24 -0500 Subject: [PATCH 087/108] Remove unnecessary node-token tasks Signed-off-by: Jon S. Stumpf --- roles/k3s/server/tasks/main.yml | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index af04059e4..31d920356 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -37,16 +37,6 @@ wait_for: path: "{{ data_dir }}/server/node-token" -- name: Register node-token file access mode - stat: - path: "{{ data_dir }}/server/node-token" - register: p - -- name: Make node-token world-readable - file: - path: "{{ data_dir }}/server/node-token" - mode: "u=rw,g=r,o=r" - - name: Read node-token from the server slurp: path: "{{ data_dir }}/server/node-token" @@ -56,11 +46,6 @@ set_fact: token: "{{ node_token.content | b64decode | regex_replace('\n', '') }}" -- name: Restore node-token file access - file: - path: "{{ data_dir }}/server/node-token" - mode: "{{ p.stat.mode }}" - ################################################################################ # Create ctl commands # From 6790e39c4bcd2eb48b11faf46d51da578adb482a Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 20:17:32 -0500 Subject: [PATCH 088/108] Broke up reset role in reset/download and reset/k3s Signed-off-by: Jon S. Stumpf --- playbook/reset.yml | 4 +- roles/reset/{ => download}/defaults/main.yml | 0 roles/reset/download/tasks/main.yml | 42 +++++++++++++++++++ roles/reset/k3s/defaults/main.yml | 7 ++++ roles/reset/{ => k3s}/tasks/main.yml | 36 ---------------- .../{ => k3s}/tasks/umount_with_children.yml | 0 6 files changed, 52 insertions(+), 37 deletions(-) rename roles/reset/{ => download}/defaults/main.yml (100%) create mode 100644 roles/reset/download/tasks/main.yml create mode 100644 roles/reset/k3s/defaults/main.yml rename roles/reset/{ => k3s}/tasks/main.yml (87%) rename roles/reset/{ => k3s}/tasks/umount_with_children.yml (100%) diff --git a/playbook/reset.yml b/playbook/reset.yml index 77577fd4a..19616cc7b 100644 --- a/playbook/reset.yml +++ b/playbook/reset.yml @@ -4,4 +4,6 @@ gather_facts: yes become: yes roles: - - role: reset + - role: reset/k3s + - role: reset/download + diff --git a/roles/reset/defaults/main.yml b/roles/reset/download/defaults/main.yml similarity index 100% rename from roles/reset/defaults/main.yml rename to roles/reset/download/defaults/main.yml diff --git a/roles/reset/download/tasks/main.yml b/roles/reset/download/tasks/main.yml new file mode 100644 index 000000000..ac2e6e7b5 --- /dev/null +++ b/roles/reset/download/tasks/main.yml @@ -0,0 +1,42 @@ +--- + +- name: Remove downloaded k3s binary + file: + path: "{{ bin_dir }}/k3s" + state: absent + when: remove_packages | bool + +# +# TODO: Where is k3s-selinux getting downloaded? +# Are these tasks even necessary? +# +- name: Remove package k3s-selinux + yum: + name: k3s-selinux + state: remove + when: + - ansible_pkg_mgr == "yum" + - remove_packages | bool + +- name: Remove package k3s-selinux + zypper: + name: k3s-selinux + state: remove + when: + - ansible_pkg_mgr == "zypper" + - remove_packages | bool + +- name: Remove yum repo files + shell: 'rm -f /etc/yum.repos.d/rancher-k3s-common*.repo' + register: remove_repo_files + when: + - ansible_pkg_mgr == "yum" + - remove_packages | bool + +- name: Remove zypper repo files + shell: 'rm -f /etc/zypp/repos.d/rancher-k3s-common*.repo' + register: remove_repo_files + when: + - ansible_pkg_mgr == "zypper" + - remove_packages | bool + diff --git a/roles/reset/k3s/defaults/main.yml b/roles/reset/k3s/defaults/main.yml new file mode 100644 index 000000000..8d3989bd4 --- /dev/null +++ b/roles/reset/k3s/defaults/main.yml @@ -0,0 +1,7 @@ +--- +ansible_user: debian +ha_enabled: false + +# Changing this flag will keep the downloaded binaries after a reset +remove_packages: true + diff --git a/roles/reset/tasks/main.yml b/roles/reset/k3s/tasks/main.yml similarity index 87% rename from roles/reset/tasks/main.yml rename to roles/reset/k3s/tasks/main.yml index dbe8d1bfb..f3cdeb951 100644 --- a/roles/reset/tasks/main.yml +++ b/roles/reset/k3s/tasks/main.yml @@ -212,45 +212,9 @@ - "{{ bin_dir }}/k3s-killall.sh" # From https://get.k3s.io - "{{ bin_dir }}/k3s-uninstall.sh" # From https://get.k3s.io -- name: Remove downloaded k3s binary - file: - path: "{{ bin_dir }}/k3s" - state: absent - when: remove_packages | bool - - name: Remove ~{{ ansible_user }}/.kube directory file: path: "~{{ ansible_user }}/.kube" state: absent when: inventory_hostname in groups['k3s_server'] -- name: Remove package k3s-selinux - yum: - name: k3s-selinux - state: remove - when: - - ansible_pkg_mgr == "yum" - - remove_packages | bool - -- name: Remove package k3s-selinux - zypper: - name: k3s-selinux - state: remove - when: - - ansible_pkg_mgr == "zypper" - - remove_packages | bool - -- name: Remove yum repo files - shell: 'rm -f /etc/yum.repos.d/rancher-k3s-common*.repo' - register: remove_repo_files - when: - - ansible_pkg_mgr == "yum" - - remove_packages | bool - -- name: Remove zypper repo files - shell: 'rm -f /etc/zypp/repos.d/rancher-k3s-common*.repo' - register: remove_repo_files - when: - - ansible_pkg_mgr == "zypper" - - remove_packages | bool - diff --git a/roles/reset/tasks/umount_with_children.yml b/roles/reset/k3s/tasks/umount_with_children.yml similarity index 100% rename from roles/reset/tasks/umount_with_children.yml rename to roles/reset/k3s/tasks/umount_with_children.yml From 38162f6f2fe9d8e0b9b98606814004cf8bec9708 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 20:29:07 -0500 Subject: [PATCH 089/108] Split out k3s-killall tasks into a separate task file Signed-off-by: Jon S. Stumpf --- roles/reset/k3s/tasks/k3s-killall.yml | 121 ++++++++++++++++++++++++ roles/reset/k3s/tasks/main.yml | 130 +++----------------------- 2 files changed, 132 insertions(+), 119 deletions(-) create mode 100644 roles/reset/k3s/tasks/k3s-killall.yml diff --git a/roles/reset/k3s/tasks/k3s-killall.yml b/roles/reset/k3s/tasks/k3s-killall.yml new file mode 100644 index 000000000..3aad8cac7 --- /dev/null +++ b/roles/reset/k3s/tasks/k3s-killall.yml @@ -0,0 +1,121 @@ +--- + +############################################################### +# The tasks below mimic k3s-killall.sh from https://get.k3s.io. +# The comments are taken from this script to make it easier to +# track changes and maintain parity. +# + +# +# for service in /etc/systemd/system/k3s*.service; do +# [ -s $service ] && systemctl stop $(basename $service) +# done +# +- name: Disable services + systemd: + name: "{{ item }}" + state: stopped + enabled: no + failed_when: false + loop: "{{ k3s_services }}" + +# +# killtree $({ set +x; } 2>/dev/null; getshims; set -x) +# +- name: pkill -9 -f "{{ data_dir }}/data/[^/]+/bin/containerd-shim" + register: pkill_containerd_shim + command: pkill -9 -f "{{ data_dir }}/data/[^/]+/bin/containerd-shim" + changed_when: "pkill_containerd_shim.rc == 0" + failed_when: false + +# +# +# do_unmount_and_remove [the list] +# +- name: Umount k3s filesystems and remove mount points + include_tasks: umount_with_children.yml + loop: + - /run/k3s + - "{{ data_dir }}" + - /var/lib/kubelet/pods + - /var/lib/kubelet/plugins + - /run/netns/cni- + loop_control: + loop_var: mounted_fs + +# +# Remove CNI namespaces +# ip netns show 2> /dev/null | grep cni- | xargs -r -t -n 1 ip netns delete +# +- name: Show CNI namespaces + register: ip_netns_show + command: ip -j netns show master cni0 + changed_when: false + +# +# BUG: Possible bug in ip-netns(8) on Raspbian. +# "ip -j netns show master cni0" does not always report "[ ]" but returns 0 when there is no master. +# +- name: Remove CNI namespaces + command: ip netns delete {{ item }} + loop: "{{ (ip_netns_show.stdout if ip_netns_show.stdout != '' else '[ ]') | from_json | json_query('[*].name') }}" + +# +# Remove CNI interfaces +# ip link show 2>/dev/null | grep 'master cni0' +# +# BUG: Possible bug in ip-link(8) on Raspbian. +# "ip -j link show master cni0" exits 255 when cni0 does not exist where +# "ip -j netns show master cni0" reports "[ ]", which is preferred. +# +- name: Get list of network interface(s) that match 'master cni0' + register: ip_link_show + shell: ip -j link show master cni0 || echo "[ ]" + changed_when: false + +- name: Remove CNI interfaces + command: ip link delete {{ item }} + loop: "{{ ip_link_show.stdout | from_json | json_query('[*].ifname') }}" + +# +# Remove other interfaces and files +# +- name: Remove other interfaces + command: ip link delete {{ item }} + when: item in ansible_interfaces + loop: + - cni0 + - flannel.1 + - flannel-v6.1 + +- name: Remove CNI files + file: + path: /var/lib/cni + state: absent + +# +# iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore +# TODO: Replace with appropriate ansible module +# + +- name: Dump iptables + register: iptables_saved + command: "iptables-save" + changed_when: false + +- name: Filter entries using regex pattern "{{ pattern }}" + vars: + pattern: '(KUBE|CNI)-' + set_fact: + iptables_filtered: >- + {{ + iptables_saved.stdout + | regex_replace('^.*'+pattern+'.*$', '#', multiline=true) + }} + +- name: Remove "(KUBE|CNI)-" iptables entries + command: + cmd: "iptables-restore" + stdin: "{{ iptables_filtered }}" + when: iptables_saved.stdout != iptables_filtered + diff --git a/roles/reset/k3s/tasks/main.yml b/roles/reset/k3s/tasks/main.yml index f3cdeb951..916145b50 100644 --- a/roles/reset/k3s/tasks/main.yml +++ b/roles/reset/k3s/tasks/main.yml @@ -1,7 +1,14 @@ --- -########################## +################################################################# +# The tasks below mimic k3s-uninstall.sh from https://get.k3s.io. +# The comments are taken from this script to make it easier to +# track changes and maintain parity. +# + +################################### # Remove HA cluster method +# Note: not from https://get.k3s.io - name: Include role, {{ role }} vars: @@ -13,126 +20,11 @@ - ha_cluster_method != 'external' - inventory_hostname in groups['k3s_server'] -########################### -# Start of k3s-killall.sh # -########################### - -# -# for service in /etc/systemd/system/k3s*.service; do -# [ -s $service ] && systemctl stop $(basename $service) -# done -# -- name: Disable services - systemd: - name: "{{ item }}" - state: stopped - enabled: no - failed_when: false - loop: "{{ k3s_services }}" - -# -# killtree $({ set +x; } 2>/dev/null; getshims; set -x) -# -- name: pkill -9 -f "{{ data_dir }}/data/[^/]+/bin/containerd-shim" - register: pkill_containerd_shim - command: pkill -9 -f "{{ data_dir }}/data/[^/]+/bin/containerd-shim" - changed_when: "pkill_containerd_shim.rc == 0" - failed_when: false - -# -# -# do_unmount_and_remove [the list] -# -- name: Umount k3s filesystems and remove mount points - include_tasks: umount_with_children.yml - loop: - - /run/k3s - - "{{ data_dir }}" - - /var/lib/kubelet/pods - - /var/lib/kubelet/plugins - - /run/netns/cni- - loop_control: - loop_var: mounted_fs - -# -# Remove CNI namespaces -# ip netns show 2> /dev/null | grep cni- | xargs -r -t -n 1 ip netns delete -# -- name: Show CNI namespaces - register: ip_netns_show - command: ip -j netns show master cni0 - changed_when: false - -# -# BUG: Possible bug in ip-netns(8) on Raspbian. -# "ip -j netns show master cni0" does not always report "[ ]" but returns 0 when there is no master. -# -- name: Remove CNI namespaces - command: ip netns delete {{ item }} - loop: "{{ (ip_netns_show.stdout if ip_netns_show.stdout != '' else '[ ]') | from_json | json_query('[*].name') }}" - -# -# Remove CNI interfaces -# ip link show 2>/dev/null | grep 'master cni0' -# -# BUG: Possible bug in ip-link(8) on Raspbian. -# "ip -j link show master cni0" exits 255 when cni0 does not exist where -# "ip -j netns show master cni0" reports "[ ]", which is preferred. -# -- name: Get list of network interface(s) that match 'master cni0' - register: ip_link_show - shell: ip -j link show master cni0 || echo "[ ]" - changed_when: false - -- name: Remove CNI interfaces - command: ip link delete {{ item }} - loop: "{{ ip_link_show.stdout | from_json | json_query('[*].ifname') }}" - # -# Remove other interfaces and files +# /usr/local/bin/k3s-killall.sh # -- name: Remove other interfaces - command: ip link delete {{ item }} - when: item in ansible_interfaces - loop: - - cni0 - - flannel.1 - - flannel-v6.1 - -- name: Remove CNI files - file: - path: /var/lib/cni - state: absent - -# -# iptables-save | grep -v KUBE- | grep -v CNI- | iptables-restore -# TODO: Replace with appropriate ansible module -# - -- name: Dump iptables - register: iptables_saved - command: "iptables-save" - changed_when: false - -- name: Filter entries using regex pattern "{{ pattern }}" - vars: - pattern: '(KUBE|CNI)-' - set_fact: - iptables_filtered: >- - {{ - iptables_saved.stdout - | regex_replace('^.*'+pattern+'.*$', '#', multiline=true) - }} - -- name: Remove "(KUBE|CNI)-" iptables entries - command: - cmd: "iptables-restore" - stdin: "{{ iptables_filtered }}" - when: iptables_saved.stdout != iptables_filtered - -######################### -# End of k3s-killall.sh # -######################### +- name: Shutdown k3s + include_tasks: k3s-killall.yml # # systemctl disable k3s From 69a39f34051bbbf3ae5ad61c0fbd6320b1ef0ff7 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 20:31:21 -0500 Subject: [PATCH 090/108] Cosmetic changes to README.md Signed-off-by: Jon S. Stumpf --- README.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index e32e50f9e..8a33b3c19 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,8 @@ Author: ## Introduction to *k3s-ansible* The goal of *k3s-ansible* is to easily install a Kubernetes cluster on a variety of operating systems running on machines with different architectures. -The intention is to support what *k3s* supports. Here is what has been tested (:heavy_check_mark:) with *k3s-ansible*. +The intention is to support what *k3s* supports.\ +Here is what has been tested (:heavy_check_mark:) with *k3s-ansible*. | Operating System | amd64 | arm64 | armhf | | :--------------- | :---: | :---: | :---: | @@ -26,7 +27,7 @@ The intention is to support what *k3s* supports. Here is what has been tested ( cp -R inventory/sample inventory/my-cluster ``` -2. Edit `inventory/my-cluster/hosts.ini` to include the hosts that will make up your new cluster. +2. Edit `inventory/my-cluster/hosts.ini` to include the hosts that will make up your new cluster.\ For example: ```bash @@ -41,7 +42,7 @@ k3s_server k3s_agent ``` -3. Edit `inventory/my-cluster/group_vars/all.yml` to best match your environment. +3. Edit `inventory/my-cluster/group_vars/all.yml` to best match your environment.\ See, `inventory/sample/group_vars/README.md` for more details. 4. Provision your new cluster. From aa0d4d3049a0faaf4bcd74f43b4b476a4b30a958 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Wed, 29 Dec 2021 21:14:22 -0500 Subject: [PATCH 091/108] Moved update to cluster config to localhost Signed-off-by: Jon S. Stumpf --- playbook/site.yml | 8 ++++++++ roles/k3s/server/tasks/main.yml | 31 ++++++++++++++++--------------- 2 files changed, 24 insertions(+), 15 deletions(-) diff --git a/playbook/site.yml b/playbook/site.yml index 4bf0da1cc..a6011c335 100644 --- a/playbook/site.yml +++ b/playbook/site.yml @@ -23,6 +23,13 @@ connection: local gather_facts: no tasks: + - name: Replace https://localhost:6443 by https://{{ apiserver_endpoint }}:6443 + lineinfile: + path: "{{ cluster_config }}" + regexp: '^(.*)https://.*:6443(.*)$' + line: '\g<1>https://{{ apiserver_endpoint }}:6443\g<2>' + backrefs: yes + - name: Wait for control-plane at {{ apiserver_endpoint }}:6443 wait_for: host: "{{ apiserver_endpoint }}" @@ -35,3 +42,4 @@ become: yes roles: - role: k3s/agent + diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index 31d920356..0ec2c013d 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -80,32 +80,33 @@ # Setup {{ ansible_user }}/.kube/config # -- name: Create directory .kube +- name: Create local kube directory, {{ path }} + vars: + path: "~{{ ansible_user }}/.kube" file: - path: ~{{ ansible_user }}/.kube + path: "{{ path }}" state: directory owner: "{{ ansible_user }}" mode: "u=rwx,g=rx,o=" -- name: Copy config file to user home directory +- name: Copy kubeconfig file, {{ src }}, to {{ dest }} + vars: + src: "/etc/rancher/k3s/k3s.yaml" + dest: "~{{ ansible_user }}/.kube/config" copy: - src: /etc/rancher/k3s/k3s.yaml - dest: ~{{ ansible_user }}/.kube/config + src: "{{ src }}" + dest: "{{ dest }}" remote_src: yes owner: "{{ ansible_user }}" mode: "u=rw,g=,o=" -- name: Replace https://localhost:6443 by https://{{ apiserver_endpoint }}:6443 - command: >- - {{ bin_dir }}/kubectl config set-cluster default - --server=https://{{ apiserver_endpoint }}:6443 - --kubeconfig ~{{ ansible_user }}/.kube/config - changed_when: true - # Fetch a copy of the cluster config for use in one's ~/.kube/config. -- name: Copy .kube/config for new cluster +# The playbook will update the IP address. +- name: Fetch {{ src }} for use in one's ~/.kube/config + vars: + src: "/etc/rancher/k3s/k3s.yaml" fetch: - src: "~{{ ansible_user }}/.kube/config" + src: "{{ src }}" dest: "{{ cluster_config }}" flat: yes run_once: true @@ -113,7 +114,7 @@ ########################## # Set up HA cluster method -- name: Include role, {{ role }} +- name: Setup HA cluster method, {{ ha_cluster_method | default('n/a') }} vars: role: "ha/{{ ha_cluster_method }}" include_role: From 6d94a87ca55fa560c7e2e8180f11a49265f451dc Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 00:17:29 -0500 Subject: [PATCH 092/108] Made playbook/site.yml reentrant (for non-HA) and it only reports changes when appropriate Signed-off-by: Jon S. Stumpf --- playbook/site.yml | 31 ++++++++++------------------- roles/cluster-config/tasks/main.yml | 28 ++++++++++++++++++++++++++ roles/k3s/server/tasks/main.yml | 5 +++-- 3 files changed, 41 insertions(+), 23 deletions(-) create mode 100644 roles/cluster-config/tasks/main.yml diff --git a/playbook/site.yml b/playbook/site.yml index a6011c335..7cfee7abc 100644 --- a/playbook/site.yml +++ b/playbook/site.yml @@ -1,7 +1,7 @@ --- -# Get hosts ready for k3s installation -- hosts: k3s_cluster +- name: Prepare hosts for k3s installation + hosts: k3s_cluster gather_facts: yes become: yes roles: @@ -11,33 +11,22 @@ - role: download - role: raspberrypi -# Install the k3s servers -- hosts: k3s_server +- name: Install the k3s servers + hosts: k3s_server gather_facts: no become: yes roles: - role: k3s/server -# Wait for control-plane before setting up agents -- hosts: 127.0.0.1 +- name: Fetch cluster config and wait for control-plane before setting up agents + hosts: 127.0.0.1 connection: local gather_facts: no - tasks: - - name: Replace https://localhost:6443 by https://{{ apiserver_endpoint }}:6443 - lineinfile: - path: "{{ cluster_config }}" - regexp: '^(.*)https://.*:6443(.*)$' - line: '\g<1>https://{{ apiserver_endpoint }}:6443\g<2>' - backrefs: yes - - - name: Wait for control-plane at {{ apiserver_endpoint }}:6443 - wait_for: - host: "{{ apiserver_endpoint }}" - port: "6443" - timeout: 60 + roles: + - role: cluster-config -# Install the k3s agents -- hosts: k3s_agent +- name: Install the k3s agents + hosts: k3s_agent gather_facts: no become: yes roles: diff --git a/roles/cluster-config/tasks/main.yml b/roles/cluster-config/tasks/main.yml new file mode 100644 index 000000000..770e22b98 --- /dev/null +++ b/roles/cluster-config/tasks/main.yml @@ -0,0 +1,28 @@ +--- + +- name: Replace https://localhost:6443 with https://{{ apiserver_endpoint }}:6443 + lineinfile: + path: "{{ cluster_config }}.tmp" + regexp: '^(.*)https://.*:6443(.*)$' + line: '\g<1>https://{{ apiserver_endpoint }}:6443\g<2>' + backrefs: yes + changed_when: false + +# The transfer uses a temporary/intermediate file because a fetched file is always "changed". +- name: Update cluster config, {{ cluster_config }} + copy: + src: "{{ cluster_config }}.tmp" + dest: "{{ cluster_config }}" + +- name: Remove temporary cluster config + file: + path: "{{ cluster_config }}.tmp" + state: absent + changed_when: false + +- name: Wait for control-plane at {{ apiserver_endpoint }}:6443 + wait_for: + host: "{{ apiserver_endpoint }}" + port: "6443" + timeout: 60 + diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index 0ec2c013d..f0992c7ba 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -101,15 +101,16 @@ mode: "u=rw,g=,o=" # Fetch a copy of the cluster config for use in one's ~/.kube/config. -# The playbook will update the IP address. +# The playbook will update the IP address using the cluster-config role. - name: Fetch {{ src }} for use in one's ~/.kube/config vars: src: "/etc/rancher/k3s/k3s.yaml" fetch: src: "{{ src }}" - dest: "{{ cluster_config }}" + dest: "{{ cluster_config }}.tmp" flat: yes run_once: true + changed_when: false ########################## # Set up HA cluster method From 8ece2967b81471210a0fdd7d56aa88a2bf05109d Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 01:12:28 -0500 Subject: [PATCH 093/108] First draft of TODO.md Signed-off-by: Jon S. Stumpf --- TODO.md | 76 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 TODO.md diff --git a/TODO.md b/TODO.md new file mode 100644 index 000000000..51f63bad3 --- /dev/null +++ b/TODO.md @@ -0,0 +1,76 @@ +# Update + +Author: [https://github.com/jon-stumpf](https://github.com/jon-stumpf) + +I came across *k3s-ansible* looking for an easy why to create a highly-available cluster +on my [Turing Pi 1](https://turingpi.com/v1/). +I saw *k3s-ansible* could easily configure my hosts but was missing the HA component. +Further research led me to the [k3s-ha](https://github.com/k3s-io/k3s-ansible/tree/k3s-ha) +branch but found that it still was incomplete for my needs. +In developing the needed additions, I discovered issues in the +[master](https://github.com/k3s-io/k3s-ansible/tree/master) branch and spent a month +reviewing the yaml files of *k3s-ansible* and the shell scripts from +[https://get.k3s.io](https://get.k3s.io). +In the end, I brought *k3s-ansible* to be at near parity with *https://get.k3s.io* and +I believe I have addressed some open issues in +[my pull requests](https://github.com/k3s-io/k3s-ansible/pulls/jon-stumpf). + +Once I completed my changes to the *master* branch, I got back to work on *k3s-ha*. +Building on the +[work of St0rmingBr4in](https://github.com/k3s-io/k3s-ansible/commits?author=St0rmingBr4in), +I implemented the HA embedded database using *etcd* and three cluster VIP methods: +1. **external**: uses an externally provided cluster VIP +2. **kube-vip**: uses [kube-vip](https://kube-vip.io/) with arp arbitration +3. **keepalived**: uses [keepalived](https://www.redhat.com/sysadmin/keepalived-basics) to implement VRRP + +I have reached out to +[itwars](https://github.com/itwars) and +[St0rmingBr4in](https://github.com/St0rmingBr4in) to get their feedback on this work and +to collaborate on closing the open issues and pull requests. +In the meantime, I would like others to provide feedback on my +[k3s-ha](https://github.com/jon-stumpf/k3s-ansible/tree/k3s-ha). +This is now stable and incorporates all my work on *k3s-ansible* except for a few commits. +Please, try it out. + +# TODO + +1. Make all roles *idempotent* and not report changes when none, in fact, are needed or material. +2. Add *keepalived*' label to servers when using keepalived; Add the following annotations: + - `keepalived/vrrp_instance=` + - `keepalived/master=[true|false]` + - `keepalived/version=` + - `keepalived/vip=/` +3. Add the ability to download the latest version of *kube-vip* + - Currently, uses a static version (v0.4.0) that can be manually changed +4. Make sure all roles have defaults defined +5. Make HA not require `k3s_token` to be defined + - i.e., use the `node-token` from the first server +6. Replace `command` and `shell` tasks with *ansible* equivalents (where appropriate) + - `ip` + - `kubectl` + - etc. +7. Is the `raspberrypi` role a NO-OP? + - It does not appear to execute any tasks that induce change + - Should it be deleted? +8. From where does *k3s-selinux* get installed? + - The `reset/download` role deletes it. + +# Progress Report + +| Role | Role Type | Idempotent | Only Real Changes | TODOs | BUGs | +| :-------------------- | :--------: | :---: | :---: | :---: | :---: | +| cluster-config | install | :heavy_check_mark: | :heavy_check_mark: | | | +| config-check | install | :heavy_check_mark: | :heavy_check_mark: | | | +| prereq | install | :heavy_check_mark: | :heavy_check_mark: | | | +| download | install | :heavy_check_mark: | :heavy_check_mark: | | | +| raspberrypi | install | :heavy_check_mark: | :heavy_check_mark: | | | +| ha/etcd | HA-only | | unknown | | | +| ha/keepalived | HA-only | :heavy_check_mark: | :heavy_check_mark: | | | +| ha/kube-vip | HA-only | :heavy_check_mark: | :heavy_check_mark: | | | +| k3s/server | install | :heavy_check_mark: | :heavy_check_mark: | | | +| k3s/agent | install | :heavy_check_mark: | :heavy_check_mark: | | | +| reset/download | uninstall | :heavy_check_mark: | :heavy_check_mark: | | | +| reset/ha/keepalived | uninstall | :heavy_check_mark: | :heavy_check_mark: | | | +| reset/ha/kube-vip | uninstall | :heavy_check_mark: | :heavy_check_mark: | | | +| reset/k3s | uninstall | :heavy_check_mark: | :heavy_check_mark: | | | + From d9ed773a0c5821824407489af58e6952d56f5f67 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 01:38:48 -0500 Subject: [PATCH 094/108] Updated TODO and added comments to roles where appropriate Signed-off-by: Jon S. Stumpf --- TODO.md | 64 +++++++++++++++--------------- roles/ha/etcd/defaults/main.yml | 2 + roles/ha/keepalived/tasks/main.yml | 3 ++ roles/ha/kube-vip/tasks/main.yml | 4 ++ roles/raspberrypi/tasks/main.yml | 3 ++ 5 files changed, 44 insertions(+), 32 deletions(-) diff --git a/TODO.md b/TODO.md index 51f63bad3..5c3b8893d 100644 --- a/TODO.md +++ b/TODO.md @@ -11,8 +11,8 @@ In developing the needed additions, I discovered issues in the [master](https://github.com/k3s-io/k3s-ansible/tree/master) branch and spent a month reviewing the yaml files of *k3s-ansible* and the shell scripts from [https://get.k3s.io](https://get.k3s.io). -In the end, I brought *k3s-ansible* to be at near parity with *https://get.k3s.io* and -I believe I have addressed some open issues in +In the end, I brought *k3s-ansible* to be at near parity with https://get.k3s.io and +I believe I have addressed some open issues as referenced in [my pull requests](https://github.com/k3s-io/k3s-ansible/pulls/jon-stumpf). Once I completed my changes to the *master* branch, I got back to work on *k3s-ha*. @@ -28,49 +28,49 @@ I have reached out to [St0rmingBr4in](https://github.com/St0rmingBr4in) to get their feedback on this work and to collaborate on closing the open issues and pull requests. In the meantime, I would like others to provide feedback on my -[k3s-ha](https://github.com/jon-stumpf/k3s-ansible/tree/k3s-ha). -This is now stable and incorporates all my work on *k3s-ansible* except for a few commits. +[k3s-ha](https://github.com/jon-stumpf/k3s-ansible/tree/k3s-ha) branch. +This is now stable and incorporates all my previous work on *k3s-ansible* except for a few commits. Please, try it out. # TODO 1. Make all roles *idempotent* and not report changes when none, in fact, are needed or material. 2. Add *keepalived*' label to servers when using keepalived; Add the following annotations: - - `keepalived/vrrp_instance=` - - `keepalived/master=[true|false]` - - `keepalived/version=` - - `keepalived/vip=/` + - `keepalived/vrrp_instance=` + - `keepalived/master=[true|false]` + - `keepalived/version=` + - `keepalived/vip=/` 3. Add the ability to download the latest version of *kube-vip* - - Currently, uses a static version (v0.4.0) that can be manually changed + - Currently, uses a static version (v0.4.0) that can be manually changed 4. Make sure all roles have defaults defined 5. Make HA not require `k3s_token` to be defined - - i.e., use the `node-token` from the first server + - i.e., use the `node-token` from the first server 6. Replace `command` and `shell` tasks with *ansible* equivalents (where appropriate) - - `ip` - - `kubectl` - - etc. + - `ip` + - `kubectl` + - etc. 7. Is the `raspberrypi` role a NO-OP? - - It does not appear to execute any tasks that induce change - - Should it be deleted? + - It does not appear to execute any tasks that induce change + - Should it be deleted? 8. From where does *k3s-selinux* get installed? - - The `reset/download` role deletes it. + - The `reset/download` role deletes it. # Progress Report -| Role | Role Type | Idempotent | Only Real Changes | TODOs | BUGs | -| :-------------------- | :--------: | :---: | :---: | :---: | :---: | -| cluster-config | install | :heavy_check_mark: | :heavy_check_mark: | | | -| config-check | install | :heavy_check_mark: | :heavy_check_mark: | | | -| prereq | install | :heavy_check_mark: | :heavy_check_mark: | | | -| download | install | :heavy_check_mark: | :heavy_check_mark: | | | -| raspberrypi | install | :heavy_check_mark: | :heavy_check_mark: | | | -| ha/etcd | HA-only | | unknown | | | -| ha/keepalived | HA-only | :heavy_check_mark: | :heavy_check_mark: | | | -| ha/kube-vip | HA-only | :heavy_check_mark: | :heavy_check_mark: | | | -| k3s/server | install | :heavy_check_mark: | :heavy_check_mark: | | | -| k3s/agent | install | :heavy_check_mark: | :heavy_check_mark: | | | -| reset/download | uninstall | :heavy_check_mark: | :heavy_check_mark: | | | -| reset/ha/keepalived | uninstall | :heavy_check_mark: | :heavy_check_mark: | | | -| reset/ha/kube-vip | uninstall | :heavy_check_mark: | :heavy_check_mark: | | | -| reset/k3s | uninstall | :heavy_check_mark: | :heavy_check_mark: | | | +| Role | Role Type | Idempotent | Only Real Changes | Defaults | commands | TODOs | BUGs | +| :-------------------- | :--------: | :---: | :---: | :---: | :---: | :---: | :---: | +| cluster-config | install | :heavy_check_mark: | :heavy_check_mark: | | - | - | - | +| config-check | install | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | - | - | - | +| prereq | install | :heavy_check_mark: | :heavy_check_mark: | **n/a** | - | - | - | +| download | install | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | - | - | - | +| raspberrypi | install | :heavy_check_mark: | :heavy_check_mark: | **n/a** | 2 | 1 | - | +| ha/etcd | HA-only | | unknown | :heavy_check_mark: | 3 | 1 | - | +| ha/keepalived | HA-only | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | 1 | 1 | - | +| ha/kube-vip | HA-only | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | 2 | 3 | - | +| k3s/server | install | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | - | - | - | +| k3s/agent | install | :heavy_check_mark: | :heavy_check_mark: | | - | - | - | +| reset/download | uninstall | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | 2 | 1 | - | +| reset/ha/keepalived | uninstall | :heavy_check_mark: | :heavy_check_mark: | *by ref* | - | - | - | +| reset/ha/kube-vip | uninstall | :heavy_check_mark: | :heavy_check_mark: | *by ref* | 3 | 1 | - | +| reset/k3s | uninstall | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | 9 | 1 | 2 | diff --git a/roles/ha/etcd/defaults/main.yml b/roles/ha/etcd/defaults/main.yml index 024b23d3d..a2d56b04b 100644 --- a/roles/ha/etcd/defaults/main.yml +++ b/roles/ha/etcd/defaults/main.yml @@ -1,5 +1,7 @@ --- +# TODO: How do we use the first_server node-token instead of k3s_token? + server_init_args: >- {% if ansible_host == first_server %} --cluster-init diff --git a/roles/ha/keepalived/tasks/main.yml b/roles/ha/keepalived/tasks/main.yml index ff70ad3c7..9f4bc68c2 100644 --- a/roles/ha/keepalived/tasks/main.yml +++ b/roles/ha/keepalived/tasks/main.yml @@ -134,3 +134,6 @@ enabled: yes state: started +############################################## +# TODO: need tasks to label and annotate nodes + diff --git a/roles/ha/kube-vip/tasks/main.yml b/roles/ha/kube-vip/tasks/main.yml index e51fdf4d6..19cbe0e34 100644 --- a/roles/ha/kube-vip/tasks/main.yml +++ b/roles/ha/kube-vip/tasks/main.yml @@ -1,5 +1,9 @@ --- +################################################### +# TODO: Implement way to determine 'latest' version +# to download. + ############################################### # Recommended in https://kube-vip.io/usage/k3s/ # See "Clean Environment". diff --git a/roles/raspberrypi/tasks/main.yml b/roles/raspberrypi/tasks/main.yml index 78e13a621..edb992ec4 100644 --- a/roles/raspberrypi/tasks/main.yml +++ b/roles/raspberrypi/tasks/main.yml @@ -1,4 +1,7 @@ --- + +# TODO: Is this role even necessary? + - name: Test for raspberry pi /proc/cpuinfo command: grep -E "Raspberry Pi|BCM2708|BCM2709|BCM2835|BCM2836" /proc/cpuinfo register: grep_cpuinfo_raspberrypi From 7cb174dd2f4eb3a3f52198295cd9c4eaa3f4d02f Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 02:22:14 -0500 Subject: [PATCH 095/108] Fixed typo in inventory/sample/group_vars/README.md Signed-off-by: Jon S. Stumpf --- inventory/sample/group_vars/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inventory/sample/group_vars/README.md b/inventory/sample/group_vars/README.md index f5ffe0fd3..4e4cbd82c 100644 --- a/inventory/sample/group_vars/README.md +++ b/inventory/sample/group_vars/README.md @@ -1,5 +1,5 @@ -## Introduction +# Introduction `inventory/x/group_vars/all.yml` is meant to be modified appropriately for your environment. From 107e74ddd6f155c52decc004cb28558b07de3b82 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 02:31:02 -0500 Subject: [PATCH 096/108] Added Turing Pi HA example. Signed-off-by: Jon S. Stumpf --- README.md | 3 ++- inventory/.gitignore | 2 ++ inventory/turingpi/group_vars/README.md | 1 + inventory/turingpi/group_vars/all.yml | 19 +++++++++++++++++++ inventory/turingpi/hosts.ini | 14 ++++++++++++++ 5 files changed, 38 insertions(+), 1 deletion(-) create mode 120000 inventory/turingpi/group_vars/README.md create mode 100644 inventory/turingpi/group_vars/all.yml create mode 100644 inventory/turingpi/hosts.ini diff --git a/README.md b/README.md index 8a33b3c19..28ae9cfd6 100644 --- a/README.md +++ b/README.md @@ -67,5 +67,6 @@ See the [HA-embedded documentation](https://rancher.com/docs/k3s/latest/en/insta HA expects that there is a virtual IP (**ha_cluster_vip**) in front of the *control-plane* servers. A few methods have been implemented to provide and manage this VIP. -See `inventory/sample/group_vars/README.md` for more details. +See `inventory/turingpi` for my example HA setup on my Turing Pi v1. +See `inventory/sample/group_vars/README.md` for more details on variables. diff --git a/inventory/.gitignore b/inventory/.gitignore index 9a1ab6b19..71d669fd5 100644 --- a/inventory/.gitignore +++ b/inventory/.gitignore @@ -2,3 +2,5 @@ !.gitignore !sample/ !sample/** +!turingpi/ +!turingpi/** diff --git a/inventory/turingpi/group_vars/README.md b/inventory/turingpi/group_vars/README.md new file mode 120000 index 000000000..7482f336f --- /dev/null +++ b/inventory/turingpi/group_vars/README.md @@ -0,0 +1 @@ +../../sample/group_vars/README.md \ No newline at end of file diff --git a/inventory/turingpi/group_vars/all.yml b/inventory/turingpi/group_vars/all.yml new file mode 100644 index 000000000..29519377e --- /dev/null +++ b/inventory/turingpi/group_vars/all.yml @@ -0,0 +1,19 @@ +--- + +# See inventory/sample/group_vars/README.md for more options. +# If this file is empty, default values will be used for all mandatory fields. + +# The user that has password-less ssh access to configure your hosts +ansible_user: pirate + +# The location of where to capture the kube config of the new cluster +# Relative paths are relative to the playbook directory. +cluster_config: cluster.conf + +ha_enabled: true +ha_cluster_vip: 192.168.140.127 +ha_cluster_method: keepalived + +# Use the latest k3s version instead of 'stable' +install_k3s_channel: 'latest' + diff --git a/inventory/turingpi/hosts.ini b/inventory/turingpi/hosts.ini new file mode 100644 index 000000000..bf353fb92 --- /dev/null +++ b/inventory/turingpi/hosts.ini @@ -0,0 +1,14 @@ +[k3s_server] +192.168.140.120 +192.168.140.123 +192.168.140.126 + +[k3s_agent] +192.168.140.121 +192.168.140.122 +192.168.140.124 +192.168.140.125 + +[k3s_cluster:children] +k3s_server +k3s_agent From a76c8d97173f2e4f9b252a660b4cdf2b24760dd5 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 02:39:23 -0500 Subject: [PATCH 097/108] Removed stray whitespace Signed-off-by: Jon S. Stumpf --- roles/reset/k3s/tasks/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/reset/k3s/tasks/main.yml b/roles/reset/k3s/tasks/main.yml index 916145b50..88ed7d3c2 100644 --- a/roles/reset/k3s/tasks/main.yml +++ b/roles/reset/k3s/tasks/main.yml @@ -85,7 +85,7 @@ state: absent when: (item.stat.exists | default(false)) and (item.stat.islnk | default(false)) loop: "{{ stat_command_files.results }}" - loop_control: + loop_control: label: "{{ item.item }}" when: inventory_hostname in groups['k3s_server'] From 094176cd376d31712a12bcc7e640ee2e3fce51b7 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 03:11:56 -0500 Subject: [PATCH 098/108] Added ha_k3s_token to inventory/sample/group_vars/all.yml Signed-off-by: Jon S. Stumpf --- README.md | 4 ++++ TODO.md | 2 ++ inventory/sample/group_vars/README.md | 8 +++++--- playbook/group_vars/all.yml | 2 +- 4 files changed, 12 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 28ae9cfd6..123170170 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,10 @@ Author: ## Introduction to *k3s-ansible* The goal of *k3s-ansible* is to easily install a Kubernetes cluster on a variety of operating systems running on machines with different architectures. +In general, users of *k3s-ansible* should only need to edit two files: +- inventory/sample/group_vars/all.yml +- inventory/sample/hosts.ini + The intention is to support what *k3s* supports.\ Here is what has been tested (:heavy_check_mark:) with *k3s-ansible*. diff --git a/TODO.md b/TODO.md index 5c3b8893d..f88aea5e9 100644 --- a/TODO.md +++ b/TODO.md @@ -54,6 +54,8 @@ Please, try it out. - Should it be deleted? 8. From where does *k3s-selinux* get installed? - The `reset/download` role deletes it. +9. Document lesser switches to control behavior of roles (e.g., remove_packages) +10. Create playbooks for other common operations beyond install/uninstall # Progress Report diff --git a/inventory/sample/group_vars/README.md b/inventory/sample/group_vars/README.md index 4e4cbd82c..d32a33120 100644 --- a/inventory/sample/group_vars/README.md +++ b/inventory/sample/group_vars/README.md @@ -29,9 +29,11 @@ It is possible to get an IP address dynamically but that is not implemented here - **ha_cluster_method**: specifies the method of clustering to use for the virtual IP. The methods implemented today are: - 1. `external` - requires a load-balancer external to the cluster - 2. `kube-vip` - [https://kube-vip.io](https://kube-vip.io), arp-based daemonset using leader election - 3. `keepalived` - all *k3s* servers are configured with [keepalived](https://www.redhat.com/sysadmin/keepalived-basics) to manage a VRRP instance + 1. `external` - requires a load-balancer external to the cluster + 2. `kube-vip` - [https://kube-vip.io](https://kube-vip.io), arp-based daemonset using leader election + 3. `keepalived` - all *k3s* servers are configured with [keepalived](https://www.redhat.com/sysadmin/keepalived-basics) to manage a VRRP instance + +- **ha_k3s_token**: specifies k3s token used by hosts to join the cluster ## Install Variables diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index 0e7f6cfa6..758c7947d 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -18,7 +18,7 @@ data_dir: "{{ install_k3s_data_dir | default('/var/lib/rancher/k3s') }}" first_server: "{{ hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) }}" apiserver_endpoint: "{{ (ha_cluster_vip | mandatory) if (ha_enabled | default(false)) else first_server }}" -k3s_token: "MySuperSecureToken" +k3s_token: "{{ ha_k3s_token | default('MySuperSecureToken') }}" # Services information k3s_services: From 1f04cff367a3b003dc0780b8065da435eadfd161 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 03:42:11 -0500 Subject: [PATCH 099/108] Fixed typo in README.md Signed-off-by: Jon S. Stumpf --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 123170170..338adb08e 100644 --- a/README.md +++ b/README.md @@ -6,8 +6,8 @@ Author: The goal of *k3s-ansible* is to easily install a Kubernetes cluster on a variety of operating systems running on machines with different architectures. In general, users of *k3s-ansible* should only need to edit two files: -- inventory/sample/group_vars/all.yml -- inventory/sample/hosts.ini +- `inventory/sample/group_vars/all.yml` +- `inventory/sample/hosts.ini` The intention is to support what *k3s* supports.\ Here is what has been tested (:heavy_check_mark:) with *k3s-ansible*. From 765f6a54b5efdf5d67f33f7aa8b8ca494192f2d7 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 12:11:09 -0500 Subject: [PATCH 100/108] Made cluster_config and ha_cluster_method optional, with defaults Signed-off-by: Jon S. Stumpf --- inventory/turingpi/group_vars/all.yml | 10 ---------- inventory/turingpi/hosts.ini | 6 +++--- playbook/group_vars/all.yml | 3 +++ roles/cluster-config/tasks/main.yml | 10 +++++----- roles/config-check/tasks/main.yml | 11 ++--------- roles/ha/kube-vip/tasks/main.yml | 2 +- roles/k3s/server/defaults/main.yml | 3 ++- roles/k3s/server/tasks/main.yml | 8 ++++---- roles/reset/ha/kube-vip/tasks/main.yml | 2 +- roles/reset/k3s/tasks/main.yml | 4 ++-- 10 files changed, 23 insertions(+), 36 deletions(-) diff --git a/inventory/turingpi/group_vars/all.yml b/inventory/turingpi/group_vars/all.yml index 29519377e..57be56cf6 100644 --- a/inventory/turingpi/group_vars/all.yml +++ b/inventory/turingpi/group_vars/all.yml @@ -1,19 +1,9 @@ --- -# See inventory/sample/group_vars/README.md for more options. -# If this file is empty, default values will be used for all mandatory fields. - # The user that has password-less ssh access to configure your hosts ansible_user: pirate -# The location of where to capture the kube config of the new cluster -# Relative paths are relative to the playbook directory. -cluster_config: cluster.conf - ha_enabled: true ha_cluster_vip: 192.168.140.127 -ha_cluster_method: keepalived -# Use the latest k3s version instead of 'stable' -install_k3s_channel: 'latest' diff --git a/inventory/turingpi/hosts.ini b/inventory/turingpi/hosts.ini index bf353fb92..89acfe6c6 100644 --- a/inventory/turingpi/hosts.ini +++ b/inventory/turingpi/hosts.ini @@ -5,9 +5,9 @@ [k3s_agent] 192.168.140.121 -192.168.140.122 -192.168.140.124 -192.168.140.125 +#192.168.140.122 +#192.168.140.124 +#192.168.140.125 [k3s_cluster:children] k3s_server diff --git a/playbook/group_vars/all.yml b/playbook/group_vars/all.yml index 758c7947d..70721c740 100644 --- a/playbook/group_vars/all.yml +++ b/playbook/group_vars/all.yml @@ -18,6 +18,9 @@ data_dir: "{{ install_k3s_data_dir | default('/var/lib/rancher/k3s') }}" first_server: "{{ hostvars[groups['k3s_server'][0]]['ansible_host'] | default(groups['k3s_server'][0]) }}" apiserver_endpoint: "{{ (ha_cluster_vip | mandatory) if (ha_enabled | default(false)) else first_server }}" +k3s_cluster_config: "{{ cluster_config | default('cluster.conf') }}" +k3s_cluster_method: "{{ ha_cluster_method | default('keepalived') }}" + k3s_token: "{{ ha_k3s_token | default('MySuperSecureToken') }}" # Services information diff --git a/roles/cluster-config/tasks/main.yml b/roles/cluster-config/tasks/main.yml index 770e22b98..6524a720b 100644 --- a/roles/cluster-config/tasks/main.yml +++ b/roles/cluster-config/tasks/main.yml @@ -2,21 +2,21 @@ - name: Replace https://localhost:6443 with https://{{ apiserver_endpoint }}:6443 lineinfile: - path: "{{ cluster_config }}.tmp" + path: "{{ k3s_cluster_config }}.tmp" regexp: '^(.*)https://.*:6443(.*)$' line: '\g<1>https://{{ apiserver_endpoint }}:6443\g<2>' backrefs: yes changed_when: false # The transfer uses a temporary/intermediate file because a fetched file is always "changed". -- name: Update cluster config, {{ cluster_config }} +- name: Update cluster config, {{ k3s_cluster_config }} copy: - src: "{{ cluster_config }}.tmp" - dest: "{{ cluster_config }}" + src: "{{ k3s_cluster_config }}.tmp" + dest: "{{ k3s_cluster_config }}" - name: Remove temporary cluster config file: - path: "{{ cluster_config }}.tmp" + path: "{{ k3s_cluster_config }}.tmp" state: absent changed_when: false diff --git a/roles/config-check/tasks/main.yml b/roles/config-check/tasks/main.yml index 672f2f039..31e557502 100644 --- a/roles/config-check/tasks/main.yml +++ b/roles/config-check/tasks/main.yml @@ -28,19 +28,12 @@ - ha_enabled - (ha_cluster_vip is not defined) or not (ha_cluster_vip | ansible.netcommon.ipaddr) -- name: Check that ha_cluster_method is defined - fail: - msg: "When HA is enabled, 'ha_cluster_method' must be defined." - when: - - ha_enabled - - ha_cluster_method is not defined - - name: Check for a proper HA cluster method fail: - msg: "'{{ ha_cluster_method }}' is not a supported HA cluster method." + msg: "'{{ k3s_cluster_method }}' is not a supported HA cluster method." when: - ha_enabled - - ha_cluster_method not in k3s_cluster_methods + - k3s_cluster_method not in k3s_cluster_methods - name: Determine version to download block: diff --git a/roles/ha/kube-vip/tasks/main.yml b/roles/ha/kube-vip/tasks/main.yml index 19cbe0e34..3cfaa1d04 100644 --- a/roles/ha/kube-vip/tasks/main.yml +++ b/roles/ha/kube-vip/tasks/main.yml @@ -22,7 +22,7 @@ - name: Create manifest directory {{ path }} vars: - path: "{{ data_dir }}/server/manifests/{{ ha_cluster_method }}" + path: "{{ data_dir }}/server/manifests/{{ k3s_cluster_method }}" file: path: "{{ path }}" state: directory diff --git a/roles/k3s/server/defaults/main.yml b/roles/k3s/server/defaults/main.yml index 03c29b68f..ad6589e7f 100644 --- a/roles/k3s/server/defaults/main.yml +++ b/roles/k3s/server/defaults/main.yml @@ -1,6 +1,7 @@ --- ansible_user: debian -cluster_config: cluster.conf ha_enabled: false +k3s_cluster_config: cluster.conf + diff --git a/roles/k3s/server/tasks/main.yml b/roles/k3s/server/tasks/main.yml index f0992c7ba..b1112aa54 100644 --- a/roles/k3s/server/tasks/main.yml +++ b/roles/k3s/server/tasks/main.yml @@ -107,7 +107,7 @@ src: "/etc/rancher/k3s/k3s.yaml" fetch: src: "{{ src }}" - dest: "{{ cluster_config }}.tmp" + dest: "{{ k3s_cluster_config }}.tmp" flat: yes run_once: true changed_when: false @@ -115,12 +115,12 @@ ########################## # Set up HA cluster method -- name: Setup HA cluster method, {{ ha_cluster_method | default('n/a') }} +- name: Setup HA cluster method, {{ k3s_cluster_method | default('n/a') }} vars: - role: "ha/{{ ha_cluster_method }}" + role: "ha/{{ k3s_cluster_method }}" include_role: name: "{{ role }}" when: - ha_enabled - - ha_cluster_method != 'external' + - k3s_cluster_method != 'external' diff --git a/roles/reset/ha/kube-vip/tasks/main.yml b/roles/reset/ha/kube-vip/tasks/main.yml index eff1ea5b4..2cc098d29 100644 --- a/roles/reset/ha/kube-vip/tasks/main.yml +++ b/roles/reset/ha/kube-vip/tasks/main.yml @@ -9,7 +9,7 @@ - name: Remove kube-vip resource and files vars: - path: "{{ data_dir }}/server/manifests/{{ ha_cluster_method }}" + path: "{{ data_dir }}/server/manifests/{{ k3s_cluster_method }}" block: - name: Check for the manifest directory, {{ path }} register: manifest_dir diff --git a/roles/reset/k3s/tasks/main.yml b/roles/reset/k3s/tasks/main.yml index 88ed7d3c2..b17b7390d 100644 --- a/roles/reset/k3s/tasks/main.yml +++ b/roles/reset/k3s/tasks/main.yml @@ -12,12 +12,12 @@ - name: Include role, {{ role }} vars: - role: "reset/ha/{{ ha_cluster_method }}" + role: "reset/ha/{{ k3s_cluster_method }}" include_role: name: "{{ role }}" when: - ha_enabled - - ha_cluster_method != 'external' + - k3s_cluster_method != 'external' - inventory_hostname in groups['k3s_server'] # From 7ffd298847449e30a129cfab8437a5a710faf70b Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 12:12:52 -0500 Subject: [PATCH 101/108] Added more content to README.md Signed-off-by: Jon S. Stumpf --- README.md | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 338adb08e..f77f03d36 100644 --- a/README.md +++ b/README.md @@ -4,12 +4,24 @@ Author: ## Introduction to *k3s-ansible* -The goal of *k3s-ansible* is to easily install a Kubernetes cluster on a variety of operating systems running on machines with different architectures. +The goal of *k3s-ansible* is to easily install a Kubernetes cluster on a variety of +operating systems running on machines with different architectures. In general, users of *k3s-ansible* should only need to edit two files: - `inventory/sample/group_vars/all.yml` - `inventory/sample/hosts.ini` -The intention is to support what *k3s* supports.\ +All you need to get started is a list of IP addresses for the hosts that you want to +participate in the cluster and a username that has password-less *ssh* access to all +those hosts. That's it! +No need to futz with lots of settings and variables (unless you like that sort of thing; +then, have at it). + +And, to setup an HA cluster, you need one more IP address - not of a host, +but for your cluster virtual IP address. +You don't need to know how to setup a clustering solution since *k3s-ansible* does it for you. +But, for HA, you just need at least three hosts. + +The intention is for *k3s-ansible* to support what *k3s* supports.\ Here is what has been tested (:heavy_check_mark:) with *k3s-ansible*. | Operating System | amd64 | arm64 | armhf | @@ -22,6 +34,7 @@ Here is what has been tested (:heavy_check_mark:) with *k3s-ansible*. - The deployment environment must have *ansible* v2.4.0+. - Hosts in the cluster must have password-less *ssh* access. +- HA requires at least three hosts. ## Usage From 54c6f8bc95c16dc6d870a51ad3b977436aa926ca Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 12:15:30 -0500 Subject: [PATCH 102/108] Updated progress report in TODO.md Signed-off-by: Jon S. Stumpf --- TODO.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/TODO.md b/TODO.md index f88aea5e9..1810c9196 100644 --- a/TODO.md +++ b/TODO.md @@ -62,17 +62,17 @@ Please, try it out. | Role | Role Type | Idempotent | Only Real Changes | Defaults | commands | TODOs | BUGs | | :-------------------- | :--------: | :---: | :---: | :---: | :---: | :---: | :---: | | cluster-config | install | :heavy_check_mark: | :heavy_check_mark: | | - | - | - | -| config-check | install | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | - | - | - | +| config-check | install | :heavy_check_mark: | :heavy_check_mark: | *under review* | - | - | - | | prereq | install | :heavy_check_mark: | :heavy_check_mark: | **n/a** | - | - | - | -| download | install | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | - | - | - | +| download | install | :heavy_check_mark: | :heavy_check_mark: | *under review* | - | - | - | | raspberrypi | install | :heavy_check_mark: | :heavy_check_mark: | **n/a** | 2 | 1 | - | -| ha/etcd | HA-only | | unknown | :heavy_check_mark: | 3 | 1 | - | -| ha/keepalived | HA-only | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | 1 | 1 | - | -| ha/kube-vip | HA-only | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | 2 | 3 | - | -| k3s/server | install | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | - | - | - | +| ha/etcd | HA-only | | unknown | *under review* | 3 | 1 | - | +| ha/keepalived | HA-only | :heavy_check_mark: | :heavy_check_mark: | *under review* | 1 | 1 | - | +| ha/kube-vip | HA-only | :heavy_check_mark: | :heavy_check_mark: | *under review* | 2 | 3 | - | +| k3s/server | install | :heavy_check_mark: | :heavy_check_mark: | *under review* | - | - | - | | k3s/agent | install | :heavy_check_mark: | :heavy_check_mark: | | - | - | - | -| reset/download | uninstall | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | 2 | 1 | - | +| reset/download | uninstall | :heavy_check_mark: | :heavy_check_mark: | *under review* | 2 | 1 | - | | reset/ha/keepalived | uninstall | :heavy_check_mark: | :heavy_check_mark: | *by ref* | - | - | - | | reset/ha/kube-vip | uninstall | :heavy_check_mark: | :heavy_check_mark: | *by ref* | 3 | 1 | - | -| reset/k3s | uninstall | :heavy_check_mark: | :heavy_check_mark: | :heavy_check_mark: | 9 | 1 | 2 | +| reset/k3s | uninstall | :heavy_check_mark: | :heavy_check_mark: | *under review* | 9 | 1 | 2 | From 9cd3b96edcb0726b8d3ff0df32fb84f3fed2d46c Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 12:17:25 -0500 Subject: [PATCH 103/108] Replaced 'by ref' with 'by reference' Signed-off-by: Jon S. Stumpf --- TODO.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/TODO.md b/TODO.md index 1810c9196..c47b53216 100644 --- a/TODO.md +++ b/TODO.md @@ -72,7 +72,7 @@ Please, try it out. | k3s/server | install | :heavy_check_mark: | :heavy_check_mark: | *under review* | - | - | - | | k3s/agent | install | :heavy_check_mark: | :heavy_check_mark: | | - | - | - | | reset/download | uninstall | :heavy_check_mark: | :heavy_check_mark: | *under review* | 2 | 1 | - | -| reset/ha/keepalived | uninstall | :heavy_check_mark: | :heavy_check_mark: | *by ref* | - | - | - | -| reset/ha/kube-vip | uninstall | :heavy_check_mark: | :heavy_check_mark: | *by ref* | 3 | 1 | - | +| reset/ha/keepalived | uninstall | :heavy_check_mark: | :heavy_check_mark: | *by reference* | - | - | - | +| reset/ha/kube-vip | uninstall | :heavy_check_mark: | :heavy_check_mark: | *by reference* | 3 | 1 | - | | reset/k3s | uninstall | :heavy_check_mark: | :heavy_check_mark: | *under review* | 9 | 1 | 2 | From 6d1cf858c638d28331239a1eb6cc3947b4f5b88d Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 12:23:47 -0500 Subject: [PATCH 104/108] Restore turingpi config Signed-off-by: Jon S. Stumpf --- inventory/turingpi/group_vars/all.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/inventory/turingpi/group_vars/all.yml b/inventory/turingpi/group_vars/all.yml index 57be56cf6..29519377e 100644 --- a/inventory/turingpi/group_vars/all.yml +++ b/inventory/turingpi/group_vars/all.yml @@ -1,9 +1,19 @@ --- +# See inventory/sample/group_vars/README.md for more options. +# If this file is empty, default values will be used for all mandatory fields. + # The user that has password-less ssh access to configure your hosts ansible_user: pirate +# The location of where to capture the kube config of the new cluster +# Relative paths are relative to the playbook directory. +cluster_config: cluster.conf + ha_enabled: true ha_cluster_vip: 192.168.140.127 +ha_cluster_method: keepalived +# Use the latest k3s version instead of 'stable' +install_k3s_channel: 'latest' From 1ecdd8f76d2cbfb87576ed50b1168a6af66802b9 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 12:25:12 -0500 Subject: [PATCH 105/108] Restore original turingpi hosts.ini Signed-off-by: Jon S. Stumpf --- inventory/turingpi/hosts.ini | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/inventory/turingpi/hosts.ini b/inventory/turingpi/hosts.ini index 89acfe6c6..bf353fb92 100644 --- a/inventory/turingpi/hosts.ini +++ b/inventory/turingpi/hosts.ini @@ -5,9 +5,9 @@ [k3s_agent] 192.168.140.121 -#192.168.140.122 -#192.168.140.124 -#192.168.140.125 +192.168.140.122 +192.168.140.124 +192.168.140.125 [k3s_cluster:children] k3s_server From a2584604734ee212d2ddaa2783a409e73948824c Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 12:37:36 -0500 Subject: [PATCH 106/108] Added Caveats Signed-off-by: Jon S. Stumpf --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index f77f03d36..4fbc84a91 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,12 @@ Here is what has been tested (:heavy_check_mark:) with *k3s-ansible*. - Hosts in the cluster must have password-less *ssh* access. - HA requires at least three hosts. +## Caveats + +- *k3s-ansible* will overwrite an existing **k3s* installation on the hosts. +- *k3s-ansible* will overwrite the `.kube` directory of the `ansible_user` specified on each server. +- An HA configuration using **keepalived** will overwrite an existing **keepalived** configuration. + ## Usage 1. Create a new cluster definition based on the `inventory/sample` directory. From 74f07e00fd77c02010e03e932fa6f71328d8ff24 Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Thu, 30 Dec 2021 12:39:55 -0500 Subject: [PATCH 107/108] Fixed formatting inconsistencies in README.md Signed-off-by: Jon S. Stumpf --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 4fbc84a91..c2c64216c 100644 --- a/README.md +++ b/README.md @@ -38,9 +38,9 @@ Here is what has been tested (:heavy_check_mark:) with *k3s-ansible*. ## Caveats -- *k3s-ansible* will overwrite an existing **k3s* installation on the hosts. +- *k3s-ansible* will overwrite an existing *k3s* installation on the hosts. - *k3s-ansible* will overwrite the `.kube` directory of the `ansible_user` specified on each server. -- An HA configuration using **keepalived** will overwrite an existing **keepalived** configuration. +- An HA configuration using *keepalived* will overwrite an existing *keepalived* configuration. ## Usage From b15fb8bec0c28e1e2c789ad1755a7835b107915b Mon Sep 17 00:00:00 2001 From: "Jon S. Stumpf" Date: Sun, 9 Jan 2022 13:07:16 -0500 Subject: [PATCH 108/108] Changed default HA cluster method to kube-vip Signed-off-by: Jon S. Stumpf --- inventory/turingpi/group_vars/all.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inventory/turingpi/group_vars/all.yml b/inventory/turingpi/group_vars/all.yml index 29519377e..41c22cfb7 100644 --- a/inventory/turingpi/group_vars/all.yml +++ b/inventory/turingpi/group_vars/all.yml @@ -12,7 +12,7 @@ cluster_config: cluster.conf ha_enabled: true ha_cluster_vip: 192.168.140.127 -ha_cluster_method: keepalived +ha_cluster_method: kube-vip # Use the latest k3s version instead of 'stable' install_k3s_channel: 'latest'