diff --git a/docs/book/src/developers/development.md b/docs/book/src/developers/development.md index 0091334a6..e68643256 100644 --- a/docs/book/src/developers/development.md +++ b/docs/book/src/developers/development.md @@ -124,25 +124,13 @@ Here is a list of required configuration parameters: ```bash # Cluster settings export CLUSTER_NAME=capl-cluster -export KUBERNETES_VERSION=1.29.1 -export LINODE_REGION=us-ord +export KUBERNETES_VERSION=v1.29.1 # Linode settings +export LINODE_REGION=us-ord export LINODE_TOKEN= - -# Machine settings -export LINODE_OS=linode/ubuntu22.04 export LINODE_CONTROL_PLANE_MACHINE_TYPE=g6-standard-2 export LINODE_MACHINE_TYPE=g6-standard-2 -export CONTROL_PLANE_MACHINE_COUNT=3 -export WORKER_MACHINE_COUNT=3 - -# Generate SSH key. -# If you want to provide your own key, skip this step and set LINODE_SSH_KEY to your existing public key. -SSH_KEY_FILE=.linodeSSHkey -rm -f "${SSH_KEY_FILE}" -ssh-keygen -t rsa -b 4096 -f "${SSH_KEY_FILE}" -N '' 1>/dev/null -export LINODE_SSH_KEY="$(cat "${SSH_KEY_FILE}.pub)" ``` ~~~admonish tip @@ -158,6 +146,11 @@ clusterctl generate cluster $CLUSTER_NAME --from ./templates/cluster-template.ya Please note the templates require the use of `clusterctl generate` to substitute the environment variables properly. ``` +~~~admonish note +If you want to have SSH access to nodes (e.g. for troubleshooting), use `./templates/cluster-template-ssh.yaml` instead, +which will add the contents of `LINODE_SSH_PUBKEY` to each node. +~~~ + #### Creating the workload cluster Once you have all the necessary environment variables set, diff --git a/templates/cluster-template-ssh.yaml b/templates/cluster-template-ssh.yaml new file mode 100644 index 000000000..e0ffff828 --- /dev/null +++ b/templates/cluster-template-ssh.yaml @@ -0,0 +1,286 @@ +apiVersion: cluster.x-k8s.io/v1beta1 +kind: Cluster +metadata: + name: ${CLUSTER_NAME} + labels: + cni: cilium + ccm: linode + crs: ${CLUSTER_NAME}-crs +spec: + clusterNetwork: + pods: + cidrBlocks: + - 192.168.128.0/17 + controlPlaneRef: + apiVersion: controlplane.cluster.x-k8s.io/v1beta1 + kind: KubeadmControlPlane + name: ${CLUSTER_NAME}-control-plane + infrastructureRef: + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: LinodeCluster + name: ${CLUSTER_NAME} +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: LinodeCluster +metadata: + name: ${CLUSTER_NAME} +spec: + region: ${LINODE_REGION} +--- +kind: KubeadmControlPlane +apiVersion: controlplane.cluster.x-k8s.io/v1beta1 +metadata: + name: ${CLUSTER_NAME}-control-plane +spec: + replicas: ${CONTROL_PLANE_MACHINE_COUNT} + machineTemplate: + infrastructureRef: + kind: LinodeMachineTemplate + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + name: ${CLUSTER_NAME}-control-plane + kubeadmConfigSpec: + files: + - path: /etc/containerd/config.toml + contentFrom: + secret: + name: common-init-files + key: containerd-config.toml + - path: /etc/modules-load.d/k8s.conf + contentFrom: + secret: + name: common-init-files + key: k8s-modules.conf + - path: /etc/sysctl.d/k8s.conf + contentFrom: + secret: + name: common-init-files + key: sysctl-k8s.conf + - path: /kubeadm-pre-init.sh + contentFrom: + secret: + name: common-init-files + key: kubeadm-pre-init.sh + permissions: "0500" + preKubeadmCommands: + - /kubeadm-pre-init.sh '{{ ds.meta_data.label }}' ${KUBERNETES_VERSION} + clusterConfiguration: + apiServer: + extraArgs: + cloud-provider: external + controllerManager: + extraArgs: + cloud-provider: external + initConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + provider-id: 'linode://{{ ds.meta_data.id }}' + name: '{{ ds.meta_data.label }}' + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + provider-id: 'linode://{{ ds.meta_data.id }}' + name: '{{ ds.meta_data.label }}' + version: "${KUBERNETES_VERSION}" +--- +kind: LinodeMachineTemplate +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +metadata: + name: ${CLUSTER_NAME}-control-plane +spec: + template: + spec: + image: ${LINODE_OS:="linode/ubuntu22.04"} + type: ${LINODE_CONTROL_PLANE_MACHINE_TYPE} + region: ${LINODE_REGION} + authorizedKeys: + - ${LINODE_SSH_PUBKEY} +--- +apiVersion: cluster.x-k8s.io/v1beta1 +kind: MachineDeployment +metadata: + name: ${CLUSTER_NAME}-md-0 +spec: + clusterName: ${CLUSTER_NAME} + replicas: ${WORKER_MACHINE_COUNT} + selector: + matchLabels: + template: + spec: + clusterName: ${CLUSTER_NAME} + version: "${KUBERNETES_VERSION}" + bootstrap: + configRef: + name: ${CLUSTER_NAME}-md-0 + apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 + kind: KubeadmConfigTemplate + infrastructureRef: + name: ${CLUSTER_NAME}-md-0 + apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 + kind: LinodeMachineTemplate +--- +apiVersion: infrastructure.cluster.x-k8s.io/v1alpha1 +kind: LinodeMachineTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 +spec: + template: + spec: + image: ${LINODE_OS:="linode/ubuntu22.04"} + type: ${LINODE_MACHINE_TYPE} + region: ${LINODE_REGION} + authorizedKeys: + - ${LINODE_SSH_PUBKEY} +--- +apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 +kind: KubeadmConfigTemplate +metadata: + name: ${CLUSTER_NAME}-md-0 +spec: + template: + spec: + files: + - path: /etc/containerd/config.toml + contentFrom: + secret: + name: common-init-files + key: containerd-config.toml + - path: /etc/modules-load.d/k8s.conf + contentFrom: + secret: + name: common-init-files + key: k8s-modules.conf + - path: /etc/sysctl.d/k8s.conf + contentFrom: + secret: + name: common-init-files + key: sysctl-k8s.conf + - path: /kubeadm-pre-init.sh + contentFrom: + secret: + name: common-init-files + key: kubeadm-pre-init.sh + permissions: "0500" + preKubeadmCommands: + - /kubeadm-pre-init.sh '{{ ds.meta_data.label }}' ${KUBERNETES_VERSION} + joinConfiguration: + nodeRegistration: + kubeletExtraArgs: + cloud-provider: external + provider-id: 'linode://{{ ds.meta_data.id }}' + name: '{{ ds.meta_data.label }}' +--- +apiVersion: v1 +kind: Secret +metadata: + name: common-init-files +stringData: + containerd-config.toml: | + version = 2 + imports = ["/etc/containerd/conf.d/*.toml"] + [plugins] + [plugins."io.containerd.grpc.v1.cri"] + sandbox_image = "registry.k8s.io/pause:3.9" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc] + runtime_type = "io.containerd.runc.v2" + [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options] + SystemdCgroup = true + k8s-modules.conf: | + overlay + br_netfilter + sysctl-k8s.conf: | + net.bridge.bridge-nf-call-iptables = 1 + net.bridge.bridge-nf-call-ip6tables = 1 + net.ipv4.ip_forward = 1 + kubeadm-pre-init.sh: | + #!/bin/bash + set -euo pipefail + export DEBIAN_FRONTEND=noninteractive + hostnamectl set-hostname "$1" && hostname -F /etc/hostname + mkdir -p -m 755 /etc/apt/keyrings + PATCH_VERSION=$${2#[v]} + VERSION=$${PATCH_VERSION%.*} + curl -fsSL "https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key" | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg + echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list + apt-get update -y + apt-get install -y kubelet=$PATCH_VERSION* kubeadm=$PATCH_VERSION* kubectl=$PATCH_VERSION* containerd + apt-mark hold kubelet kubeadm kubectl containerd + modprobe overlay + modprobe br_netfilter + sysctl --system + sed -i '/swap/d' /etc/fstab + swapoff -a +--- +apiVersion: v1 +kind: Secret +type: addons.cluster.x-k8s.io/resource-set +metadata: + name: linode-${CLUSTER_NAME}-crs-0 +stringData: + linode-token-region.yaml: |- + kind: Secret + apiVersion: v1 + metadata: + name: linode-token-region + namespace: kube-system + stringData: + apiToken: ${LINODE_TOKEN} + region: ${LINODE_REGION} +--- +apiVersion: addons.cluster.x-k8s.io/v1beta1 +kind: ClusterResourceSet +metadata: + name: ${CLUSTER_NAME}-crs-0 +spec: + clusterSelector: + matchLabels: + crs: ${CLUSTER_NAME}-crs + resources: + - kind: Secret + name: linode-${CLUSTER_NAME}-crs-0 + strategy: ApplyOnce +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: cilium +spec: + clusterSelector: + matchLabels: + cni: cilium + repoURL: https://helm.cilium.io/ + chartName: cilium + version: 1.15.0 + options: + waitForJobs: true + wait: true + timeout: 5m + valuesTemplate: | + hubble: + relay: + enabled: true + ui: + enabled: true +--- +apiVersion: addons.cluster.x-k8s.io/v1alpha1 +kind: HelmChartProxy +metadata: + name: linode-cloud-controller-manager +spec: + clusterSelector: + matchLabels: + ccm: linode + repoURL: https://linode.github.io/linode-cloud-controller-manager/ + chartName: ccm-linode + namespace: kube-system + version: v0.3.24 + options: + waitForJobs: true + wait: true + timeout: 5m + valuesTemplate: | + secretRef: + name: "linode-token-region" + image: + pullPolicy: IfNotPresent diff --git a/templates/cluster-template.yaml b/templates/cluster-template.yaml index df156df6f..ccc13fd43 100644 --- a/templates/cluster-template.yaml +++ b/templates/cluster-template.yaml @@ -94,8 +94,6 @@ spec: image: ${LINODE_OS:="linode/ubuntu22.04"} type: ${LINODE_CONTROL_PLANE_MACHINE_TYPE} region: ${LINODE_REGION} - authorizedKeys: - - ${LINODE_SSH_KEY} --- apiVersion: cluster.x-k8s.io/v1beta1 kind: MachineDeployment @@ -130,8 +128,6 @@ spec: image: ${LINODE_OS:="linode/ubuntu22.04"} type: ${LINODE_MACHINE_TYPE} region: ${LINODE_REGION} - authorizedKeys: - - ${LINODE_SSH_KEY} --- apiVersion: bootstrap.cluster.x-k8s.io/v1beta1 kind: KubeadmConfigTemplate @@ -199,12 +195,12 @@ stringData: export DEBIAN_FRONTEND=noninteractive hostnamectl set-hostname "$1" && hostname -F /etc/hostname mkdir -p -m 755 /etc/apt/keyrings - VERSION=$${2#[v]} - VERSION=$${VERSION%.*} + PATCH_VERSION=$${2#[v]} + VERSION=$${PATCH_VERSION%.*} curl -fsSL "https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/Release.key" | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v$VERSION/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list apt-get update -y - apt-get install -y kubelet=$2* kubeadm=$2* kubectl=$2* containerd + apt-get install -y kubelet=$PATCH_VERSION* kubeadm=$PATCH_VERSION* kubectl=$PATCH_VERSION* containerd apt-mark hold kubelet kubeadm kubectl containerd modprobe overlay modprobe br_netfilter