From 851544756417a2a4342770ecf71fd0d31622841f Mon Sep 17 00:00:00 2001 From: Manuel Buil Date: Wed, 27 Mar 2024 08:33:42 +0100 Subject: [PATCH] Organize network docs differently Signed-off-by: Manuel Buil --- docs/installation/installation.md | 2 +- .../basic-network-options.md} | 90 +-------- .../network-options/distributed-multicloud.md | 84 ++++++++ .../network-options/multus-ipams.md | 75 +++++++ .../network-options/network-options.md | 12 ++ docs/networking.md | 2 +- .../current/installation/installation.md | 2 +- .../basic-network-options.md} | 90 +-------- .../network-options/distributed-multicloud.md | 84 ++++++++ .../network-options/multus-ipams.md | 75 +++++++ .../network-options/network-options.md | 12 ++ .../current/installation/installation.md | 2 +- .../network-options/basic-network-options.md | 185 ++++++++++++++++++ .../network-options/distributed-multicloud.md | 84 ++++++++ .../network-options/multus-ipams.md | 75 +++++++ .../network-options/network-options.md | 12 ++ .../current/networking.md | 2 +- package.json | 4 +- sidebars.js | 11 +- yarn.lock | 4 +- 20 files changed, 725 insertions(+), 182 deletions(-) rename docs/installation/{network-options.md => network-options/basic-network-options.md} (71%) create mode 100644 docs/installation/network-options/distributed-multicloud.md create mode 100644 docs/installation/network-options/multus-ipams.md create mode 100644 docs/installation/network-options/network-options.md rename i18n/kr/docusaurus-plugin-content-docs/current/installation/{network-options.md => network-options/basic-network-options.md} (71%) create mode 100644 i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/distributed-multicloud.md create mode 100644 i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/multus-ipams.md create mode 100644 i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/network-options.md create mode 100644 i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/basic-network-options.md create mode 100644 i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/distributed-multicloud.md create mode 100644 i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/multus-ipams.md create mode 100644 i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/network-options.md diff --git a/docs/installation/installation.md b/docs/installation/installation.md index 279b5266e..51caedf18 100644 --- a/docs/installation/installation.md +++ b/docs/installation/installation.md @@ -7,7 +7,7 @@ This section contains instructions for installing K3s in various environments. P [Configuration Options](configuration.md) provides guidance on the options available to you when installing K3s. -[Network Options](network-options.md) provides guidance on the networking options available in k3s. +[Network Options](network-options/network-options.md) provides guidance on the networking options available in k3s. [Private Registry Configuration](private-registry.md) covers use of `registries.yaml` to configure container image registry mirrors. diff --git a/docs/installation/network-options.md b/docs/installation/network-options/basic-network-options.md similarity index 71% rename from docs/installation/network-options.md rename to docs/installation/network-options/basic-network-options.md index d4689529a..a14fedcde 100644 --- a/docs/installation/network-options.md +++ b/docs/installation/network-options/basic-network-options.md @@ -1,11 +1,11 @@ --- -title: "Network Options" +title: "Basic Network Options" weight: 25 --- This page describes K3s network configuration options, including configuration or replacement of Flannel, and configuring IPv6. -> **Note:** Please reference the [Networking](../networking.md) page for information about CoreDNS, Traefik, and the Service LB. +> **Note:** Please reference the [Networking](../../networking.md) page for information about CoreDNS, Traefik, and the Service LB. ## Flannel Options @@ -13,7 +13,7 @@ This page describes K3s network configuration options, including configuration o * Flannel options can only be set on server nodes, and must be identical on all servers in the cluster. * The default backend for Flannel is `vxlan`. To enable encryption, use the `wireguard-native` backend. -* Using `vxlan` on Rasperry Pi with recent versions of Ubuntu requires [additional preparation](./requirements.md?os=pi#operating-systems). +* Using `vxlan` on Rasperry Pi with recent versions of Ubuntu requires [additional preparation](../requirements.md?os=pi#operating-systems). * Using `wireguard-native` as the Flannel backend may require additional modules on some Linux distributions. Please see the [WireGuard Install Guide](https://www.wireguard.com/install/) for details. The WireGuard install steps will ensure the appropriate kernel modules are installed for your operating system. You must ensure that WireGuard kernel modules are available on every node, both servers and agents, before attempting to use the WireGuard Flannel backend. @@ -120,7 +120,7 @@ K3s agents and servers maintain websocket tunnels between nodes that are used to This allows agents to operate without exposing the kubelet and container runtime streaming ports to incoming connections, and for the control-plane to connect to cluster services when operating with the agent disabled. This functionality is equivalent to the [Konnectivity](https://kubernetes.io/docs/tasks/extend-kubernetes/setup-konnectivity/) service commonly used on other Kubernetes distributions, and is managed via the apiserver's egress selector configuration. -The default mode is `agent`. `pod` or `cluster` modes are recommended when running [agentless servers](../advanced.md#running-agentless-servers-experimental), in order to provide the apiserver with access to cluster service endpoints in the absence of flannel and kube-proxy. +The default mode is `agent`. `pod` or `cluster` modes are recommended when running [agentless servers](../../advanced.md#running-agentless-servers-experimental), in order to provide the apiserver with access to cluster service endpoints in the absence of flannel and kube-proxy. The egress selector mode may be configured on servers via the `--egress-selector-mode` flag, and offers four modes: * `disabled`: The apiserver does not use agent tunnels to communicate with kubelets or cluster endpoints. @@ -183,85 +183,3 @@ Single-stack IPv6 clusters (clusters without IPv4) are supported on K3s using th ```bash --cluster-cidr=2001:cafe:42::/56 --service-cidr=2001:cafe:43::/112 ``` - -## Distributed hybrid or multicloud cluster - -A K3s cluster can still be deployed on nodes which do not share a common private network and are not directly connected (e.g. nodes in different public clouds). There are two options to achieve this: the embedded k3s multicloud solution and the integration with the `tailscale` VPN provider. - -:::warning -The latency between nodes will increase as external connectivity requires more hops. This will reduce the network performance and could also impact the health of the cluster if latency is too high. -::: - -:::warning -Embedded etcd is not supported in this type of deployment. If using embedded etcd, all server nodes must be reachable to each other via their private IPs. Agents may be distributed over multiple networks, but all servers should be in the same location. -::: - -### Embedded k3s multicloud solution - -K3s uses wireguard to establish a VPN mesh for cluster traffic. Nodes must each have a unique IP through which they can be reached (usually a public IP). K3s supervisor traffic will use a websocket tunnel, and cluster (CNI) traffic will use a wireguard tunnel. - -To enable this type of deployment, you must add the following parameters on servers: -```bash ---node-external-ip= --flannel-backend=wireguard-native --flannel-external-ip -``` -and on agents: -```bash ---node-external-ip= -``` - -where `SERVER_EXTERNAL_IP` is the IP through which we can reach the server node and `AGENT_EXTERNAL_IP` is the IP through which we can reach the agent node. Note that the `K3S_URL` config parameter in the agent should use the `SERVER_EXTERNAL_IP` to be able to connect to it. Remember to check the [Networking Requirements](../installation/requirements.md#networking) and allow access to the listed ports on both internal and external addresses. - -Both `SERVER_EXTERNAL_IP` and `AGENT_EXTERNAL_IP` must have connectivity between them and are normally public IPs. - -:::info Dynamic IPs -If nodes are assigned dynamic IPs and the IP changes (e.g. in AWS), you must modify the `--node-external-ip` parameter to reflect the new IP. If running K3s as a service, you must modify `/etc/systemd/system/k3s.service` then run: - -```bash -systemctl daemon-reload -systemctl restart k3s -``` -::: - -### Integration with the Tailscale VPN provider (experimental) - -Available in v1.27.3, v1.26.6, v1.25.11 and newer. - -K3s can integrate with [Tailscale](https://tailscale.com/) so that nodes use the Tailscale VPN service to build a mesh between nodes. - -There are four steps to be done with Tailscale before deploying K3s: - -1. Log in to your Tailscale account - -2. In `Settings > Keys`, generate an auth key ($AUTH-KEY), which may be reusable for all nodes in your cluster - -3. Decide on the podCIDR the cluster will use (by default `10.42.0.0/16`). Append the CIDR (or CIDRs for dual-stack) in Access controls with the stanza: -```yaml -"autoApprovers": { - "routes": { - "10.42.0.0/16": ["your_account@xyz.com"], - "2001:cafe:42::/56": ["your_account@xyz.com"], - }, - }, -``` - -4. Install Tailscale in your nodes: -```bash -curl -fsSL https://tailscale.com/install.sh | sh -``` - -To deploy K3s with Tailscale integration enabled, you must add the following parameter on each of your nodes: -```bash ---vpn-auth="name=tailscale,joinKey=$AUTH-KEY -``` -or provide that information in a file and use the parameter: -```bash ---vpn-auth-file=$PATH_TO_FILE -``` - -Optionally, if you have your own Tailscale server (e.g. headscale), you can connect to it by appending `,controlServerURL=$URL` to the vpn-auth parameters - -:::warning - -If you plan on running several K3s clusters using the same tailscale network, please create appropriate [ACLs](https://tailscale.com/kb/1018/acls/) to avoid IP conflicts or use different podCIDR subnets for each cluster. - -::: diff --git a/docs/installation/network-options/distributed-multicloud.md b/docs/installation/network-options/distributed-multicloud.md new file mode 100644 index 000000000..ce9a06251 --- /dev/null +++ b/docs/installation/network-options/distributed-multicloud.md @@ -0,0 +1,84 @@ +--- +title: "Distributed hybrid or multicloud cluster" +weight: 25 +--- + +A K3s cluster can still be deployed on nodes which do not share a common private network and are not directly connected (e.g. nodes in different public clouds). There are two options to achieve this: the embedded k3s multicloud solution and the integration with the `tailscale` VPN provider. + +:::warning +The latency between nodes will increase as external connectivity requires more hops. This will reduce the network performance and could also impact the health of the cluster if latency is too high. +::: + +:::warning +Embedded etcd is not supported in this type of deployment. If using embedded etcd, all server nodes must be reachable to each other via their private IPs. Agents may be distributed over multiple networks, but all servers should be in the same location. +::: + +### Embedded k3s multicloud solution + +K3s uses wireguard to establish a VPN mesh for cluster traffic. Nodes must each have a unique IP through which they can be reached (usually a public IP). K3s supervisor traffic will use a websocket tunnel, and cluster (CNI) traffic will use a wireguard tunnel. + +To enable this type of deployment, you must add the following parameters on servers: +```bash +--node-external-ip= --flannel-backend=wireguard-native --flannel-external-ip +``` +and on agents: +```bash +--node-external-ip= +``` + +where `SERVER_EXTERNAL_IP` is the IP through which we can reach the server node and `AGENT_EXTERNAL_IP` is the IP through which we can reach the agent node. Note that the `K3S_URL` config parameter in the agent should use the `SERVER_EXTERNAL_IP` to be able to connect to it. Remember to check the [Networking Requirements](../../installation/requirements.md#networking) and allow access to the listed ports on both internal and external addresses. + +Both `SERVER_EXTERNAL_IP` and `AGENT_EXTERNAL_IP` must have connectivity between them and are normally public IPs. + +:::info Dynamic IPs +If nodes are assigned dynamic IPs and the IP changes (e.g. in AWS), you must modify the `--node-external-ip` parameter to reflect the new IP. If running K3s as a service, you must modify `/etc/systemd/system/k3s.service` then run: + +```bash +systemctl daemon-reload +systemctl restart k3s +``` +::: + +### Integration with the Tailscale VPN provider (experimental) + +Available in v1.27.3, v1.26.6, v1.25.11 and newer. + +K3s can integrate with [Tailscale](https://tailscale.com/) so that nodes use the Tailscale VPN service to build a mesh between nodes. + +There are four steps to be done with Tailscale before deploying K3s: + +1. Log in to your Tailscale account + +2. In `Settings > Keys`, generate an auth key ($AUTH-KEY), which may be reusable for all nodes in your cluster + +3. Decide on the podCIDR the cluster will use (by default `10.42.0.0/16`). Append the CIDR (or CIDRs for dual-stack) in Access controls with the stanza: +```yaml +"autoApprovers": { + "routes": { + "10.42.0.0/16": ["your_account@xyz.com"], + "2001:cafe:42::/56": ["your_account@xyz.com"], + }, + }, +``` + +4. Install Tailscale in your nodes: +```bash +curl -fsSL https://tailscale.com/install.sh | sh +``` + +To deploy K3s with Tailscale integration enabled, you must add the following parameter on each of your nodes: +```bash +--vpn-auth="name=tailscale,joinKey=$AUTH-KEY +``` +or provide that information in a file and use the parameter: +```bash +--vpn-auth-file=$PATH_TO_FILE +``` + +Optionally, if you have your own Tailscale server (e.g. headscale), you can connect to it by appending `,controlServerURL=$URL` to the vpn-auth parameters + +:::warning + +If you plan on running several K3s clusters using the same tailscale network, please create appropriate [ACLs](https://tailscale.com/kb/1018/acls/) to avoid IP conflicts or use different podCIDR subnets for each cluster. + +::: diff --git a/docs/installation/network-options/multus-ipams.md b/docs/installation/network-options/multus-ipams.md new file mode 100644 index 000000000..86428bba2 --- /dev/null +++ b/docs/installation/network-options/multus-ipams.md @@ -0,0 +1,75 @@ +--- +title: "Multus and IPAM plugins" +weight: 25 +--- + +[Multus CNI](https://github.com/k8snetworkplumbingwg/multus-cni) is a CNI plugin that enables attaching multiple network interfaces to pods. Multus does not replace CNI plugins, instead it acts as a CNI plugin multiplexer. Multus is useful in certain use cases, especially when pods are network intensive and require extra network interfaces that support dataplane acceleration techniques such as SR-IOV. + +Multus can not be deployed standalone. It always requires at least one conventional CNI plugin that fulfills the Kubernetes cluster network requirements. That CNI plugin becomes the default for Multus, and will be used to provide the primary interface for all pods. When deploying K3s with default options, that CNI plugin is Flannel. + +To deploy Multus, we recommend using the following helm repo: +``` +helm repo add rke2-charts https://rke2-charts.rancher.io +helm repo update +``` + +Then, to set the necessary configuration for it to work, a correct config file must be created. The configuration will depend on the IPAM plugin to be used, i.e. how your pods using Multus extra interfaces will configure the IPs for those extra interfaces. There are three options: host-local, DHCP Daemon and whereabouts: + + + +The host-local IPAM plugin allocates ip addresses out of a set of address ranges. It stores the state locally on the host filesystem, hence ensuring uniqueness of IP addresses on a single host. Therefore, we don't recommend it for multi-node clusters. This IPAM plugin does not require any extra deployment. For more information: https://www.cni.dev/plugins/current/ipam/host-local/. + +To use the host-local plugin, please create a file called `multus-values.yaml` with the following content: +``` +config: + cni_conf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/current/bin/ + kubeconfig: /var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig +``` + + + +[Whereabouts](https://github.com/k8snetworkplumbingwg/whereabouts) is an IP Address Management (IPAM) CNI plugin that assigns IP addresses cluster-wide. + +To use the Whereabouts IPAM plugin, please create a file called multus-values.yaml with the following content: +``` +config: + cni_conf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/current/bin/ + kubeconfig: /var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig +rke2-whereabouts: + fullnameOverride: whereabouts + enabled: true + cniConf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/current/bin/ +``` + + + +The dhcp IPAM plugin can be deployed when there is already a DHCP server running on the network. This daemonset takes care of periodically renewing the DHCP lease. For more information please check the official docs of [DHCP IPAM plugin](https://www.cni.dev/plugins/current/ipam/dhcp/). + +To use this DHCP plugin, please create a file called multus-values.yaml with the following content: +``` +config: + cni_conf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/current/bin/ + kubeconfig: /var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig +manifests: + dhcpDaemonSet: true +``` + + + + +After creating the `multus-values.yaml` file, everything is ready to install Multus: +``` +helm install multus rke2-charts/rke2-multus -n kube-system --kubeconfig /etc/rancher/k3s/k3s.yaml --values multus-values.yaml +``` + +That will create a daemonset called multus which will deploy multus and all regular cni binaries in /var/lib/rancher/k3s/data/current/ (e.g. macvlan) and the correct Multus config in /var/lib/rancher/k3s/agent/etc/cni/net.d + +For more information about Multus, refer to the [multus-cni](https://github.com/k8snetworkplumbingwg/multus-cni/tree/master/docs) documentation. diff --git a/docs/installation/network-options/network-options.md b/docs/installation/network-options/network-options.md new file mode 100644 index 000000000..a478dc330 --- /dev/null +++ b/docs/installation/network-options/network-options.md @@ -0,0 +1,12 @@ +--- +title: "Network options" +weight: 20 +--- + +This section contains instructions for configuring networking in K3s. + +[Basic Network Options](basic-network-options.md) covers the basic networking configuration of the cluster such as flannel and single/dual stack configurations + +[Hybrid/Multicloud cluster](distributed-multicloud.md) provides guidance on the options available to span the k3s cluster over remote or hybrid nodes + +[Multus and IPAM plugins](multus-ipams.md) provides guidance to leverage Multus in K3s in order to have multiple interfaces per pod diff --git a/docs/networking.md b/docs/networking.md index c207c5133..5140f6a5e 100644 --- a/docs/networking.md +++ b/docs/networking.md @@ -5,7 +5,7 @@ weight: 35 This page explains how CoreDNS, Traefik Ingress controller, Network Policy controller, and ServiceLB load balancer controller work within K3s. -Refer to the [Installation Network Options](./installation/network-options.md) page for details on Flannel configuration options and backend selection, or how to set up your own CNI. +Refer to the [Installation Network Options](./installation/network-options/network-options.md) page for details on Flannel configuration options and backend selection, or how to set up your own CNI. For information on which ports need to be opened for K3s, refer to the [Networking Requirements](./installation/requirements.md#networking). diff --git a/i18n/kr/docusaurus-plugin-content-docs/current/installation/installation.md b/i18n/kr/docusaurus-plugin-content-docs/current/installation/installation.md index 279b5266e..51caedf18 100644 --- a/i18n/kr/docusaurus-plugin-content-docs/current/installation/installation.md +++ b/i18n/kr/docusaurus-plugin-content-docs/current/installation/installation.md @@ -7,7 +7,7 @@ This section contains instructions for installing K3s in various environments. P [Configuration Options](configuration.md) provides guidance on the options available to you when installing K3s. -[Network Options](network-options.md) provides guidance on the networking options available in k3s. +[Network Options](network-options/network-options.md) provides guidance on the networking options available in k3s. [Private Registry Configuration](private-registry.md) covers use of `registries.yaml` to configure container image registry mirrors. diff --git a/i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options.md b/i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/basic-network-options.md similarity index 71% rename from i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options.md rename to i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/basic-network-options.md index d4689529a..a14fedcde 100644 --- a/i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options.md +++ b/i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/basic-network-options.md @@ -1,11 +1,11 @@ --- -title: "Network Options" +title: "Basic Network Options" weight: 25 --- This page describes K3s network configuration options, including configuration or replacement of Flannel, and configuring IPv6. -> **Note:** Please reference the [Networking](../networking.md) page for information about CoreDNS, Traefik, and the Service LB. +> **Note:** Please reference the [Networking](../../networking.md) page for information about CoreDNS, Traefik, and the Service LB. ## Flannel Options @@ -13,7 +13,7 @@ This page describes K3s network configuration options, including configuration o * Flannel options can only be set on server nodes, and must be identical on all servers in the cluster. * The default backend for Flannel is `vxlan`. To enable encryption, use the `wireguard-native` backend. -* Using `vxlan` on Rasperry Pi with recent versions of Ubuntu requires [additional preparation](./requirements.md?os=pi#operating-systems). +* Using `vxlan` on Rasperry Pi with recent versions of Ubuntu requires [additional preparation](../requirements.md?os=pi#operating-systems). * Using `wireguard-native` as the Flannel backend may require additional modules on some Linux distributions. Please see the [WireGuard Install Guide](https://www.wireguard.com/install/) for details. The WireGuard install steps will ensure the appropriate kernel modules are installed for your operating system. You must ensure that WireGuard kernel modules are available on every node, both servers and agents, before attempting to use the WireGuard Flannel backend. @@ -120,7 +120,7 @@ K3s agents and servers maintain websocket tunnels between nodes that are used to This allows agents to operate without exposing the kubelet and container runtime streaming ports to incoming connections, and for the control-plane to connect to cluster services when operating with the agent disabled. This functionality is equivalent to the [Konnectivity](https://kubernetes.io/docs/tasks/extend-kubernetes/setup-konnectivity/) service commonly used on other Kubernetes distributions, and is managed via the apiserver's egress selector configuration. -The default mode is `agent`. `pod` or `cluster` modes are recommended when running [agentless servers](../advanced.md#running-agentless-servers-experimental), in order to provide the apiserver with access to cluster service endpoints in the absence of flannel and kube-proxy. +The default mode is `agent`. `pod` or `cluster` modes are recommended when running [agentless servers](../../advanced.md#running-agentless-servers-experimental), in order to provide the apiserver with access to cluster service endpoints in the absence of flannel and kube-proxy. The egress selector mode may be configured on servers via the `--egress-selector-mode` flag, and offers four modes: * `disabled`: The apiserver does not use agent tunnels to communicate with kubelets or cluster endpoints. @@ -183,85 +183,3 @@ Single-stack IPv6 clusters (clusters without IPv4) are supported on K3s using th ```bash --cluster-cidr=2001:cafe:42::/56 --service-cidr=2001:cafe:43::/112 ``` - -## Distributed hybrid or multicloud cluster - -A K3s cluster can still be deployed on nodes which do not share a common private network and are not directly connected (e.g. nodes in different public clouds). There are two options to achieve this: the embedded k3s multicloud solution and the integration with the `tailscale` VPN provider. - -:::warning -The latency between nodes will increase as external connectivity requires more hops. This will reduce the network performance and could also impact the health of the cluster if latency is too high. -::: - -:::warning -Embedded etcd is not supported in this type of deployment. If using embedded etcd, all server nodes must be reachable to each other via their private IPs. Agents may be distributed over multiple networks, but all servers should be in the same location. -::: - -### Embedded k3s multicloud solution - -K3s uses wireguard to establish a VPN mesh for cluster traffic. Nodes must each have a unique IP through which they can be reached (usually a public IP). K3s supervisor traffic will use a websocket tunnel, and cluster (CNI) traffic will use a wireguard tunnel. - -To enable this type of deployment, you must add the following parameters on servers: -```bash ---node-external-ip= --flannel-backend=wireguard-native --flannel-external-ip -``` -and on agents: -```bash ---node-external-ip= -``` - -where `SERVER_EXTERNAL_IP` is the IP through which we can reach the server node and `AGENT_EXTERNAL_IP` is the IP through which we can reach the agent node. Note that the `K3S_URL` config parameter in the agent should use the `SERVER_EXTERNAL_IP` to be able to connect to it. Remember to check the [Networking Requirements](../installation/requirements.md#networking) and allow access to the listed ports on both internal and external addresses. - -Both `SERVER_EXTERNAL_IP` and `AGENT_EXTERNAL_IP` must have connectivity between them and are normally public IPs. - -:::info Dynamic IPs -If nodes are assigned dynamic IPs and the IP changes (e.g. in AWS), you must modify the `--node-external-ip` parameter to reflect the new IP. If running K3s as a service, you must modify `/etc/systemd/system/k3s.service` then run: - -```bash -systemctl daemon-reload -systemctl restart k3s -``` -::: - -### Integration with the Tailscale VPN provider (experimental) - -Available in v1.27.3, v1.26.6, v1.25.11 and newer. - -K3s can integrate with [Tailscale](https://tailscale.com/) so that nodes use the Tailscale VPN service to build a mesh between nodes. - -There are four steps to be done with Tailscale before deploying K3s: - -1. Log in to your Tailscale account - -2. In `Settings > Keys`, generate an auth key ($AUTH-KEY), which may be reusable for all nodes in your cluster - -3. Decide on the podCIDR the cluster will use (by default `10.42.0.0/16`). Append the CIDR (or CIDRs for dual-stack) in Access controls with the stanza: -```yaml -"autoApprovers": { - "routes": { - "10.42.0.0/16": ["your_account@xyz.com"], - "2001:cafe:42::/56": ["your_account@xyz.com"], - }, - }, -``` - -4. Install Tailscale in your nodes: -```bash -curl -fsSL https://tailscale.com/install.sh | sh -``` - -To deploy K3s with Tailscale integration enabled, you must add the following parameter on each of your nodes: -```bash ---vpn-auth="name=tailscale,joinKey=$AUTH-KEY -``` -or provide that information in a file and use the parameter: -```bash ---vpn-auth-file=$PATH_TO_FILE -``` - -Optionally, if you have your own Tailscale server (e.g. headscale), you can connect to it by appending `,controlServerURL=$URL` to the vpn-auth parameters - -:::warning - -If you plan on running several K3s clusters using the same tailscale network, please create appropriate [ACLs](https://tailscale.com/kb/1018/acls/) to avoid IP conflicts or use different podCIDR subnets for each cluster. - -::: diff --git a/i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/distributed-multicloud.md b/i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/distributed-multicloud.md new file mode 100644 index 000000000..ce9a06251 --- /dev/null +++ b/i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/distributed-multicloud.md @@ -0,0 +1,84 @@ +--- +title: "Distributed hybrid or multicloud cluster" +weight: 25 +--- + +A K3s cluster can still be deployed on nodes which do not share a common private network and are not directly connected (e.g. nodes in different public clouds). There are two options to achieve this: the embedded k3s multicloud solution and the integration with the `tailscale` VPN provider. + +:::warning +The latency between nodes will increase as external connectivity requires more hops. This will reduce the network performance and could also impact the health of the cluster if latency is too high. +::: + +:::warning +Embedded etcd is not supported in this type of deployment. If using embedded etcd, all server nodes must be reachable to each other via their private IPs. Agents may be distributed over multiple networks, but all servers should be in the same location. +::: + +### Embedded k3s multicloud solution + +K3s uses wireguard to establish a VPN mesh for cluster traffic. Nodes must each have a unique IP through which they can be reached (usually a public IP). K3s supervisor traffic will use a websocket tunnel, and cluster (CNI) traffic will use a wireguard tunnel. + +To enable this type of deployment, you must add the following parameters on servers: +```bash +--node-external-ip= --flannel-backend=wireguard-native --flannel-external-ip +``` +and on agents: +```bash +--node-external-ip= +``` + +where `SERVER_EXTERNAL_IP` is the IP through which we can reach the server node and `AGENT_EXTERNAL_IP` is the IP through which we can reach the agent node. Note that the `K3S_URL` config parameter in the agent should use the `SERVER_EXTERNAL_IP` to be able to connect to it. Remember to check the [Networking Requirements](../../installation/requirements.md#networking) and allow access to the listed ports on both internal and external addresses. + +Both `SERVER_EXTERNAL_IP` and `AGENT_EXTERNAL_IP` must have connectivity between them and are normally public IPs. + +:::info Dynamic IPs +If nodes are assigned dynamic IPs and the IP changes (e.g. in AWS), you must modify the `--node-external-ip` parameter to reflect the new IP. If running K3s as a service, you must modify `/etc/systemd/system/k3s.service` then run: + +```bash +systemctl daemon-reload +systemctl restart k3s +``` +::: + +### Integration with the Tailscale VPN provider (experimental) + +Available in v1.27.3, v1.26.6, v1.25.11 and newer. + +K3s can integrate with [Tailscale](https://tailscale.com/) so that nodes use the Tailscale VPN service to build a mesh between nodes. + +There are four steps to be done with Tailscale before deploying K3s: + +1. Log in to your Tailscale account + +2. In `Settings > Keys`, generate an auth key ($AUTH-KEY), which may be reusable for all nodes in your cluster + +3. Decide on the podCIDR the cluster will use (by default `10.42.0.0/16`). Append the CIDR (or CIDRs for dual-stack) in Access controls with the stanza: +```yaml +"autoApprovers": { + "routes": { + "10.42.0.0/16": ["your_account@xyz.com"], + "2001:cafe:42::/56": ["your_account@xyz.com"], + }, + }, +``` + +4. Install Tailscale in your nodes: +```bash +curl -fsSL https://tailscale.com/install.sh | sh +``` + +To deploy K3s with Tailscale integration enabled, you must add the following parameter on each of your nodes: +```bash +--vpn-auth="name=tailscale,joinKey=$AUTH-KEY +``` +or provide that information in a file and use the parameter: +```bash +--vpn-auth-file=$PATH_TO_FILE +``` + +Optionally, if you have your own Tailscale server (e.g. headscale), you can connect to it by appending `,controlServerURL=$URL` to the vpn-auth parameters + +:::warning + +If you plan on running several K3s clusters using the same tailscale network, please create appropriate [ACLs](https://tailscale.com/kb/1018/acls/) to avoid IP conflicts or use different podCIDR subnets for each cluster. + +::: diff --git a/i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/multus-ipams.md b/i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/multus-ipams.md new file mode 100644 index 000000000..86428bba2 --- /dev/null +++ b/i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/multus-ipams.md @@ -0,0 +1,75 @@ +--- +title: "Multus and IPAM plugins" +weight: 25 +--- + +[Multus CNI](https://github.com/k8snetworkplumbingwg/multus-cni) is a CNI plugin that enables attaching multiple network interfaces to pods. Multus does not replace CNI plugins, instead it acts as a CNI plugin multiplexer. Multus is useful in certain use cases, especially when pods are network intensive and require extra network interfaces that support dataplane acceleration techniques such as SR-IOV. + +Multus can not be deployed standalone. It always requires at least one conventional CNI plugin that fulfills the Kubernetes cluster network requirements. That CNI plugin becomes the default for Multus, and will be used to provide the primary interface for all pods. When deploying K3s with default options, that CNI plugin is Flannel. + +To deploy Multus, we recommend using the following helm repo: +``` +helm repo add rke2-charts https://rke2-charts.rancher.io +helm repo update +``` + +Then, to set the necessary configuration for it to work, a correct config file must be created. The configuration will depend on the IPAM plugin to be used, i.e. how your pods using Multus extra interfaces will configure the IPs for those extra interfaces. There are three options: host-local, DHCP Daemon and whereabouts: + + + +The host-local IPAM plugin allocates ip addresses out of a set of address ranges. It stores the state locally on the host filesystem, hence ensuring uniqueness of IP addresses on a single host. Therefore, we don't recommend it for multi-node clusters. This IPAM plugin does not require any extra deployment. For more information: https://www.cni.dev/plugins/current/ipam/host-local/. + +To use the host-local plugin, please create a file called `multus-values.yaml` with the following content: +``` +config: + cni_conf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/current/bin/ + kubeconfig: /var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig +``` + + + +[Whereabouts](https://github.com/k8snetworkplumbingwg/whereabouts) is an IP Address Management (IPAM) CNI plugin that assigns IP addresses cluster-wide. + +To use the Whereabouts IPAM plugin, please create a file called multus-values.yaml with the following content: +``` +config: + cni_conf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/current/bin/ + kubeconfig: /var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig +rke2-whereabouts: + fullnameOverride: whereabouts + enabled: true + cniConf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/current/bin/ +``` + + + +The dhcp IPAM plugin can be deployed when there is already a DHCP server running on the network. This daemonset takes care of periodically renewing the DHCP lease. For more information please check the official docs of [DHCP IPAM plugin](https://www.cni.dev/plugins/current/ipam/dhcp/). + +To use this DHCP plugin, please create a file called multus-values.yaml with the following content: +``` +config: + cni_conf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/current/bin/ + kubeconfig: /var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig +manifests: + dhcpDaemonSet: true +``` + + + + +After creating the `multus-values.yaml` file, everything is ready to install Multus: +``` +helm install multus rke2-charts/rke2-multus -n kube-system --kubeconfig /etc/rancher/k3s/k3s.yaml --values multus-values.yaml +``` + +That will create a daemonset called multus which will deploy multus and all regular cni binaries in /var/lib/rancher/k3s/data/current/ (e.g. macvlan) and the correct Multus config in /var/lib/rancher/k3s/agent/etc/cni/net.d + +For more information about Multus, refer to the [multus-cni](https://github.com/k8snetworkplumbingwg/multus-cni/tree/master/docs) documentation. diff --git a/i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/network-options.md b/i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/network-options.md new file mode 100644 index 000000000..a478dc330 --- /dev/null +++ b/i18n/kr/docusaurus-plugin-content-docs/current/installation/network-options/network-options.md @@ -0,0 +1,12 @@ +--- +title: "Network options" +weight: 20 +--- + +This section contains instructions for configuring networking in K3s. + +[Basic Network Options](basic-network-options.md) covers the basic networking configuration of the cluster such as flannel and single/dual stack configurations + +[Hybrid/Multicloud cluster](distributed-multicloud.md) provides guidance on the options available to span the k3s cluster over remote or hybrid nodes + +[Multus and IPAM plugins](multus-ipams.md) provides guidance to leverage Multus in K3s in order to have multiple interfaces per pod diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/installation/installation.md b/i18n/zh/docusaurus-plugin-content-docs/current/installation/installation.md index 279b5266e..51caedf18 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/installation/installation.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/installation/installation.md @@ -7,7 +7,7 @@ This section contains instructions for installing K3s in various environments. P [Configuration Options](configuration.md) provides guidance on the options available to you when installing K3s. -[Network Options](network-options.md) provides guidance on the networking options available in k3s. +[Network Options](network-options/network-options.md) provides guidance on the networking options available in k3s. [Private Registry Configuration](private-registry.md) covers use of `registries.yaml` to configure container image registry mirrors. diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/basic-network-options.md b/i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/basic-network-options.md new file mode 100644 index 000000000..a14fedcde --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/basic-network-options.md @@ -0,0 +1,185 @@ +--- +title: "Basic Network Options" +weight: 25 +--- + +This page describes K3s network configuration options, including configuration or replacement of Flannel, and configuring IPv6. + +> **Note:** Please reference the [Networking](../../networking.md) page for information about CoreDNS, Traefik, and the Service LB. + +## Flannel Options + +[Flannel](https://github.com/flannel-io/flannel/blob/master/README.md) is a lightweight provider of layer 3 network fabric that implements the Kubernetes Container Network Interface (CNI). It is what is commonly referred to as a CNI Plugin. + +* Flannel options can only be set on server nodes, and must be identical on all servers in the cluster. +* The default backend for Flannel is `vxlan`. To enable encryption, use the `wireguard-native` backend. +* Using `vxlan` on Rasperry Pi with recent versions of Ubuntu requires [additional preparation](../requirements.md?os=pi#operating-systems). +* Using `wireguard-native` as the Flannel backend may require additional modules on some Linux distributions. Please see the [WireGuard Install Guide](https://www.wireguard.com/install/) for details. + The WireGuard install steps will ensure the appropriate kernel modules are installed for your operating system. + You must ensure that WireGuard kernel modules are available on every node, both servers and agents, before attempting to use the WireGuard Flannel backend. + + +| CLI Flag and Value | Description | +|--------------------|-------------| +| `--flannel-ipv6-masq` | Apply masquerading rules to IPv6 traffic (default for IPv4). Only applies on dual-stack or IPv6-only clusters. Compatible with any Flannel backend other than `none`. | +| `--flannel-external-ip` | Use node external IP addresses as the destination for Flannel traffic, instead of internal IPs. Only applies when --node-external-ip is set on a node. | +| `--flannel-backend=vxlan` | Use VXLAN to encapsulate the packets. May require additional kernel modules on Raspberry Pi. | +| `--flannel-backend=host-gw` | Use IP routes to pod subnets via node IPs. Requires direct layer 2 connectivity between all nodes in the cluster. | +| `--flannel-backend=wireguard-native` | Use WireGuard to encapsulate and encrypt network traffic. May require additional kernel modules. | +| `--flannel-backend=ipsec` | Use strongSwan IPSec via the `swanctl` binary to encrypt network traffic. (Deprecated; will be removed in v1.27.0) | +| `--flannel-backend=none` | Disable Flannel entirely. | + +:::info Version Gate + +K3s no longer includes strongSwan `swanctl` and `charon` binaries starting with the 2022-12 releases (v1.26.0+k3s1, v1.25.5+k3s1, v1.24.9+k3s1, v1.23.15+k3s1). Please install the correct packages on your node before upgrading to or installing these releases if you want to use the `ipsec` backend. + +::: + +### Migrating from `wireguard` or `ipsec` to `wireguard-native` + +The legacy `wireguard` backend requires installation of the `wg` tool on the host. This backend is not available in K3s v1.26 and higher, in favor of `wireguard-native` backend, which directly interfaces with the kernel. + +The legacy `ipsec` backend requires installation of the `swanctl` and `charon` binaries on the host. This backend is not available in K3s v1.27 and higher, in favor of the `wireguard-native` backend. + +We recommend that users migrate to the new backend as soon as possible. The migration requires a short period of downtime while nodes come up with the new configuration. You should follow these two steps: + +1. Update the K3s config on all server nodes. If using config files, the `/etc/rancher/k3s/config.yaml` should include `flannel-backend: wireguard-native` instead of `flannel-backend: wireguard` or `flannel-backend: ipsec`. If you are configuring K3s via CLI flags in the systemd unit, the equivalent flags should be changed. +2. Reboot all nodes, starting with the servers. + +## Custom CNI + +Start K3s with `--flannel-backend=none` and install your CNI of choice. Most CNI plugins come with their own network policy engine, so it is recommended to set `--disable-network-policy` as well to avoid conflicts. Some important information to take into consideration: + + + + +Visit the [Canal Docs](https://docs.tigera.io/calico/latest/getting-started/kubernetes/flannel/install-for-flannel#installing-calico-for-policy-and-flannel-aka-canal-for-networking) website. Follow the steps to install Canal. Modify the Canal YAML so that IP forwarding is allowed in the `container_settings` section, for example: + +```yaml +"container_settings": { + "allow_ip_forwarding": true +} +``` + +Apply the Canal YAML. + +Ensure the settings were applied by running the following command on the host: + +```bash +cat /etc/cni/net.d/10-canal.conflist +``` + +You should see that IP forwarding is set to true. + + + + +Follow the [Calico CNI Plugins Guide](https://docs.tigera.io/calico/latest/reference/configure-cni-plugins). Modify the Calico YAML so that IP forwarding is allowed in the `container_settings` section, for example: + +```yaml +"container_settings": { + "allow_ip_forwarding": true +} +``` + +Apply the Calico YAML. + +Ensure the settings were applied by running the following command on the host: + +```bash +cat /etc/cni/net.d/10-calico.conflist +``` + +You should see that IP forwarding is set to true. + + + + + +Before running `k3s-killall.sh` or `k3s-uninstall.sh`, you must manually remove `cilium_host`, `cilium_net` and `cilium_vxlan` interfaces. If you fail to do this, you may lose network connectivity to the host when K3s is stopped + +```bash +ip link delete cilium_host +ip link delete cilium_net +ip link delete cilium_vxlan +``` + +Additionally, iptables rules for cilium should be removed: + +```bash +iptables-save | grep -iv cilium | iptables-restore +ip6tables-save | grep -iv cilium | ip6tables-restore +``` + + + + +## Control-Plane Egress Selector configuration + +K3s agents and servers maintain websocket tunnels between nodes that are used to encapsulate bidirectional communication between the control-plane (apiserver) and agent (kubelet and containerd) components. +This allows agents to operate without exposing the kubelet and container runtime streaming ports to incoming connections, and for the control-plane to connect to cluster services when operating with the agent disabled. +This functionality is equivalent to the [Konnectivity](https://kubernetes.io/docs/tasks/extend-kubernetes/setup-konnectivity/) service commonly used on other Kubernetes distributions, and is managed via the apiserver's egress selector configuration. + +The default mode is `agent`. `pod` or `cluster` modes are recommended when running [agentless servers](../../advanced.md#running-agentless-servers-experimental), in order to provide the apiserver with access to cluster service endpoints in the absence of flannel and kube-proxy. + +The egress selector mode may be configured on servers via the `--egress-selector-mode` flag, and offers four modes: +* `disabled`: The apiserver does not use agent tunnels to communicate with kubelets or cluster endpoints. + This mode requires that servers run the kubelet, CNI, and kube-proxy, and have direct connectivity to agents, or the apiserver will not be able to access service endpoints or perform `kubectl exec` and `kubectl logs`. +* `agent` (default): The apiserver uses agent tunnels to communicate with kubelets. + This mode requires that the servers also run the kubelet, CNI, and kube-proxy, or the apiserver will not be able to access service endpoints. +* `pod`: The apiserver uses agent tunnels to communicate with kubelets and service endpoints, routing endpoint connections to the correct agent by watching Nodes and Endpoints. + **NOTE**: This mode will not work when using a CNI that uses its own IPAM and does not respect the node's PodCIDR allocation. `cluster` or `agent` mode should be used with these CNIs instead. +* `cluster`: The apiserver uses agent tunnels to communicate with kubelets and service endpoints, routing endpoint connections to the correct agent by watching Pods and Endpoints. This mode has the highest portability across different cluster configurations, at the cost of increased overhead. + +## Dual-stack (IPv4 + IPv6) Networking + +:::info Version Gate + +Experimental support is available as of [v1.21.0+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.21.0%2Bk3s1). +Stable support is available as of [v1.23.7+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.23.7%2Bk3s1). + +::: + +:::warning Known Issue + +Before 1.27, Kubernetes [Issue #111695](https://github.com/kubernetes/kubernetes/issues/111695) causes the Kubelet to ignore the node IPv6 addresses if you have a dual-stack environment and you are not using the primary network interface for cluster traffic. To avoid this bug, use 1.27 or newer or add the following flag to both K3s servers and agents: + +``` +--kubelet-arg="node-ip=0.0.0.0" # To proritize IPv4 traffic +#OR +--kubelet-arg="node-ip=::" # To proritize IPv6 traffic +``` + +::: + +Dual-stack networking must be configured when the cluster is first created. It cannot be enabled on an existing cluster once it has been started as IPv4-only. + +To enable dual-stack in K3s, you must provide valid dual-stack `cluster-cidr` and `service-cidr` on all server nodes. This is an example of a valid configuration: + +``` +--cluster-cidr=10.42.0.0/16,2001:cafe:42::/56 --service-cidr=10.43.0.0/16,2001:cafe:43::/112 +``` + +Note that you may configure any valid `cluster-cidr` and `service-cidr` values, but the above masks are recommended. If you change the `cluster-cidr` mask, you should also change the `node-cidr-mask-size-ipv4` and `node-cidr-mask-size-ipv6` values to match the planned pods per node and total node count. The largest supported `service-cidr` mask is /12 for IPv4, and /112 for IPv6. Remember to allow ipv6 traffic if you are deploying in a public cloud. + +If you are using a custom CNI plugin, i.e. a CNI plugin other than Flannel, the additional configuration may be required. Please consult your plugin's dual-stack documentation and verify if network policies can be enabled. + +:::warning Known Issue +When defining cluster-cidr and service-cidr with IPv6 as the primary family, the node-ip of all cluster members should be explicitly set, placing node's desired IPv6 address as the first address. By default, the kubelet always uses IPv4 as the primary address family. +::: + +## Single-stack IPv6 Networking + +:::info Version Gate +Available as of [v1.22.9+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.22.9%2Bk3s1) +::: + +:::warning Known Issue +If your IPv6 default route is set by a router advertisement (RA), you will need to set the sysctl `net.ipv6.conf.all.accept_ra=2`; otherwise, the node will drop the default route once it expires. Be aware that accepting RAs could increase the risk of [man-in-the-middle attacks](https://github.com/kubernetes/kubernetes/issues/91507). +::: + +Single-stack IPv6 clusters (clusters without IPv4) are supported on K3s using the `--cluster-cidr` and `--service-cidr` flags. This is an example of a valid configuration: + +```bash +--cluster-cidr=2001:cafe:42::/56 --service-cidr=2001:cafe:43::/112 +``` diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/distributed-multicloud.md b/i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/distributed-multicloud.md new file mode 100644 index 000000000..ce9a06251 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/distributed-multicloud.md @@ -0,0 +1,84 @@ +--- +title: "Distributed hybrid or multicloud cluster" +weight: 25 +--- + +A K3s cluster can still be deployed on nodes which do not share a common private network and are not directly connected (e.g. nodes in different public clouds). There are two options to achieve this: the embedded k3s multicloud solution and the integration with the `tailscale` VPN provider. + +:::warning +The latency between nodes will increase as external connectivity requires more hops. This will reduce the network performance and could also impact the health of the cluster if latency is too high. +::: + +:::warning +Embedded etcd is not supported in this type of deployment. If using embedded etcd, all server nodes must be reachable to each other via their private IPs. Agents may be distributed over multiple networks, but all servers should be in the same location. +::: + +### Embedded k3s multicloud solution + +K3s uses wireguard to establish a VPN mesh for cluster traffic. Nodes must each have a unique IP through which they can be reached (usually a public IP). K3s supervisor traffic will use a websocket tunnel, and cluster (CNI) traffic will use a wireguard tunnel. + +To enable this type of deployment, you must add the following parameters on servers: +```bash +--node-external-ip= --flannel-backend=wireguard-native --flannel-external-ip +``` +and on agents: +```bash +--node-external-ip= +``` + +where `SERVER_EXTERNAL_IP` is the IP through which we can reach the server node and `AGENT_EXTERNAL_IP` is the IP through which we can reach the agent node. Note that the `K3S_URL` config parameter in the agent should use the `SERVER_EXTERNAL_IP` to be able to connect to it. Remember to check the [Networking Requirements](../../installation/requirements.md#networking) and allow access to the listed ports on both internal and external addresses. + +Both `SERVER_EXTERNAL_IP` and `AGENT_EXTERNAL_IP` must have connectivity between them and are normally public IPs. + +:::info Dynamic IPs +If nodes are assigned dynamic IPs and the IP changes (e.g. in AWS), you must modify the `--node-external-ip` parameter to reflect the new IP. If running K3s as a service, you must modify `/etc/systemd/system/k3s.service` then run: + +```bash +systemctl daemon-reload +systemctl restart k3s +``` +::: + +### Integration with the Tailscale VPN provider (experimental) + +Available in v1.27.3, v1.26.6, v1.25.11 and newer. + +K3s can integrate with [Tailscale](https://tailscale.com/) so that nodes use the Tailscale VPN service to build a mesh between nodes. + +There are four steps to be done with Tailscale before deploying K3s: + +1. Log in to your Tailscale account + +2. In `Settings > Keys`, generate an auth key ($AUTH-KEY), which may be reusable for all nodes in your cluster + +3. Decide on the podCIDR the cluster will use (by default `10.42.0.0/16`). Append the CIDR (or CIDRs for dual-stack) in Access controls with the stanza: +```yaml +"autoApprovers": { + "routes": { + "10.42.0.0/16": ["your_account@xyz.com"], + "2001:cafe:42::/56": ["your_account@xyz.com"], + }, + }, +``` + +4. Install Tailscale in your nodes: +```bash +curl -fsSL https://tailscale.com/install.sh | sh +``` + +To deploy K3s with Tailscale integration enabled, you must add the following parameter on each of your nodes: +```bash +--vpn-auth="name=tailscale,joinKey=$AUTH-KEY +``` +or provide that information in a file and use the parameter: +```bash +--vpn-auth-file=$PATH_TO_FILE +``` + +Optionally, if you have your own Tailscale server (e.g. headscale), you can connect to it by appending `,controlServerURL=$URL` to the vpn-auth parameters + +:::warning + +If you plan on running several K3s clusters using the same tailscale network, please create appropriate [ACLs](https://tailscale.com/kb/1018/acls/) to avoid IP conflicts or use different podCIDR subnets for each cluster. + +::: diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/multus-ipams.md b/i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/multus-ipams.md new file mode 100644 index 000000000..86428bba2 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/multus-ipams.md @@ -0,0 +1,75 @@ +--- +title: "Multus and IPAM plugins" +weight: 25 +--- + +[Multus CNI](https://github.com/k8snetworkplumbingwg/multus-cni) is a CNI plugin that enables attaching multiple network interfaces to pods. Multus does not replace CNI plugins, instead it acts as a CNI plugin multiplexer. Multus is useful in certain use cases, especially when pods are network intensive and require extra network interfaces that support dataplane acceleration techniques such as SR-IOV. + +Multus can not be deployed standalone. It always requires at least one conventional CNI plugin that fulfills the Kubernetes cluster network requirements. That CNI plugin becomes the default for Multus, and will be used to provide the primary interface for all pods. When deploying K3s with default options, that CNI plugin is Flannel. + +To deploy Multus, we recommend using the following helm repo: +``` +helm repo add rke2-charts https://rke2-charts.rancher.io +helm repo update +``` + +Then, to set the necessary configuration for it to work, a correct config file must be created. The configuration will depend on the IPAM plugin to be used, i.e. how your pods using Multus extra interfaces will configure the IPs for those extra interfaces. There are three options: host-local, DHCP Daemon and whereabouts: + + + +The host-local IPAM plugin allocates ip addresses out of a set of address ranges. It stores the state locally on the host filesystem, hence ensuring uniqueness of IP addresses on a single host. Therefore, we don't recommend it for multi-node clusters. This IPAM plugin does not require any extra deployment. For more information: https://www.cni.dev/plugins/current/ipam/host-local/. + +To use the host-local plugin, please create a file called `multus-values.yaml` with the following content: +``` +config: + cni_conf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/current/bin/ + kubeconfig: /var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig +``` + + + +[Whereabouts](https://github.com/k8snetworkplumbingwg/whereabouts) is an IP Address Management (IPAM) CNI plugin that assigns IP addresses cluster-wide. + +To use the Whereabouts IPAM plugin, please create a file called multus-values.yaml with the following content: +``` +config: + cni_conf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/current/bin/ + kubeconfig: /var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig +rke2-whereabouts: + fullnameOverride: whereabouts + enabled: true + cniConf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/current/bin/ +``` + + + +The dhcp IPAM plugin can be deployed when there is already a DHCP server running on the network. This daemonset takes care of periodically renewing the DHCP lease. For more information please check the official docs of [DHCP IPAM plugin](https://www.cni.dev/plugins/current/ipam/dhcp/). + +To use this DHCP plugin, please create a file called multus-values.yaml with the following content: +``` +config: + cni_conf: + confDir: /var/lib/rancher/k3s/agent/etc/cni/net.d + binDir: /var/lib/rancher/k3s/data/current/bin/ + kubeconfig: /var/lib/rancher/k3s/agent/etc/cni/net.d/multus.d/multus.kubeconfig +manifests: + dhcpDaemonSet: true +``` + + + + +After creating the `multus-values.yaml` file, everything is ready to install Multus: +``` +helm install multus rke2-charts/rke2-multus -n kube-system --kubeconfig /etc/rancher/k3s/k3s.yaml --values multus-values.yaml +``` + +That will create a daemonset called multus which will deploy multus and all regular cni binaries in /var/lib/rancher/k3s/data/current/ (e.g. macvlan) and the correct Multus config in /var/lib/rancher/k3s/agent/etc/cni/net.d + +For more information about Multus, refer to the [multus-cni](https://github.com/k8snetworkplumbingwg/multus-cni/tree/master/docs) documentation. diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/network-options.md b/i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/network-options.md new file mode 100644 index 000000000..a478dc330 --- /dev/null +++ b/i18n/zh/docusaurus-plugin-content-docs/current/installation/network-options/network-options.md @@ -0,0 +1,12 @@ +--- +title: "Network options" +weight: 20 +--- + +This section contains instructions for configuring networking in K3s. + +[Basic Network Options](basic-network-options.md) covers the basic networking configuration of the cluster such as flannel and single/dual stack configurations + +[Hybrid/Multicloud cluster](distributed-multicloud.md) provides guidance on the options available to span the k3s cluster over remote or hybrid nodes + +[Multus and IPAM plugins](multus-ipams.md) provides guidance to leverage Multus in K3s in order to have multiple interfaces per pod diff --git a/i18n/zh/docusaurus-plugin-content-docs/current/networking.md b/i18n/zh/docusaurus-plugin-content-docs/current/networking.md index 290b35d65..005da42ba 100644 --- a/i18n/zh/docusaurus-plugin-content-docs/current/networking.md +++ b/i18n/zh/docusaurus-plugin-content-docs/current/networking.md @@ -5,7 +5,7 @@ weight: 35 本文介绍了 CoreDNS、Traefik Ingress controller 和 Klipper service load balancer 是如何在 K3s 中工作的。 -有关 Flannel 配置选项和后端选择,以及如何设置自己的 CNI,请参阅[安装网络选项](./installation/network-options.md)页面。 +有关 Flannel 配置选项和后端选择,以及如何设置自己的 CNI,请参阅[安装网络选项](./installation/network-options/network-options.md)页面。 有关 K3s 需要开放哪些端口,请参考[网络要求](./installation/requirements.md#网络)。 diff --git a/package.json b/package.json index d1898dbfd..3ab38a971 100644 --- a/package.json +++ b/package.json @@ -17,7 +17,7 @@ "@docusaurus/core": "^3.1.1", "@docusaurus/plugin-client-redirects": "^3.1.1", "@docusaurus/preset-classic": "^3.1.1", - "@docusaurus/theme-common": "^3.0.1", + "@docusaurus/theme-common": "^3.1.1", "@docusaurus/theme-mermaid": "^3.1.1", "@easyops-cn/docusaurus-search-local": "^0.40.1", "@mdx-js/react": "3.0.1", @@ -32,7 +32,7 @@ "remark-validate-links-heading-id": "^0.0.3" }, "devDependencies": { - "@docusaurus/module-type-aliases": "^3.0.1" + "@docusaurus/module-type-aliases": "^3.1.1" }, "browserslist": { "production": [ diff --git a/sidebars.js b/sidebars.js index 12159e798..2431c24de 100644 --- a/sidebars.js +++ b/sidebars.js @@ -9,7 +9,16 @@ module.exports = { items:[ 'installation/requirements', 'installation/configuration', - 'installation/network-options', + { + type: 'category', + label: 'Network Options', + link: { type: 'doc', id: 'installation/network-options/network-options'}, + items: [ + 'installation/network-options/basic-network-options', + 'installation/network-options/distributed-multicloud', + 'installation/network-options/multus-ipams', + ], + }, 'installation/private-registry', 'installation/registry-mirror', 'installation/airgap', diff --git a/yarn.lock b/yarn.lock index d5022825e..3be832c13 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1384,7 +1384,7 @@ vfile "^6.0.1" webpack "^5.88.1" -"@docusaurus/module-type-aliases@3.1.1", "@docusaurus/module-type-aliases@^3.0.1": +"@docusaurus/module-type-aliases@3.1.1", "@docusaurus/module-type-aliases@^3.1.1": version "3.1.1" resolved "https://registry.yarnpkg.com/@docusaurus/module-type-aliases/-/module-type-aliases-3.1.1.tgz#b304402b0535a13ebd4c0db1c368d2604d54d02f" integrity sha512-xBJyx0TMfAfVZ9ZeIOb1awdXgR4YJMocIEzTps91rq+hJDFJgJaylDtmoRhUxkwuYmNK1GJpW95b7DLztSBJ3A== @@ -1587,7 +1587,7 @@ tslib "^2.6.0" utility-types "^3.10.0" -"@docusaurus/theme-common@3.1.1", "@docusaurus/theme-common@^3.0.1": +"@docusaurus/theme-common@3.1.1", "@docusaurus/theme-common@^3.1.1": version "3.1.1" resolved "https://registry.yarnpkg.com/@docusaurus/theme-common/-/theme-common-3.1.1.tgz#5a16893928b8379c9e83aef01d753e7e142459e2" integrity sha512-38urZfeMhN70YaXkwIGXmcUcv2CEYK/2l4b05GkJPrbEbgpsIZM3Xc+Js2ehBGGZmfZq8GjjQ5RNQYG+MYzCYg==