Skip to content

Commit

Permalink
Added an example on how to deploy all the edge components
Browse files Browse the repository at this point in the history
  • Loading branch information
e-minguez committed Mar 22, 2024
1 parent 1b82229 commit 338e559
Show file tree
Hide file tree
Showing 19 changed files with 579 additions and 0 deletions.
117 changes: 117 additions & 0 deletions examples/edge-stack-iso/custom/files/basic-setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,117 @@
#!/bin/bash
# Pre-requisites. Cluster already running
export RKE2KUBECTL="/var/lib/rancher/rke2/bin/kubectl"
export K3SKUBECTL="/opt/bin/kubectl"
export RKE2KUBECONFIG="/etc/rancher/rke2/rke2.yaml"
export K3SKUBECONFIG="/etc/rancher/k3s/k3s.yaml"

export SUSECAFILE="/usr/share/pki/trust/anchors/SUSE_Trust_Root.crt.pem"
export SUSECACM="suse-internal-ca"
export SUSECACMNAMESPACE="kube-system"

########################
# METAL3 CHART DETAILS #
########################
export METAL3_CHART_NAME="metal3"
export METAL3_CHART_VERSION="0.6.3"
export METAL3_CHART_VALUESFILE="metal3.yaml"
export METAL3_CHART_CREATENAMESPACE="True"
export METAL3_CHART_INSTALLATIONNAMESPACE="kube-system"
export METAL3_CHART_TARGETNAMESPACE="metal3-system"

###########################
# METAL3 CHART REPOSITORY #
###########################
export METAL3_CHART_REPOSITORY_NAME="suse-edge"
export METAL3_CHART_REPOSITORY_URL="https://suse-edge.github.io/charts"
export METAL3_CHART_REPOSITORY_CAFILE=""
export METAL3_CHART_REPOSITORY_PLAINHTTP="False"
export METAL3_CHART_REPOSITORY_SKIPTLSVERIFY="False"
export METAL3_CHART_REPOSITORY_USERNAME=""
export METAL3_CHART_REPOSITORY_PASSWORD=""

###############
# METAL3 CAPI #
###############
export METAL3_CLUSTERCTLVERSION="1.6.2"
export METAL3_CAPICOREVERSION="1.6.0"
export METAL3_CAPIMETAL3VERSION="1.6.0"
export METAL3_CAPIRKE2VERSION="0.2.6"
export METAL3_CAPIPROVIDER="rke2"
export METAL3_CAPISYSTEMNAMESPACE="capi-system"
export METAL3_RKE2BOOTSTRAPNAMESPACE="rke2-bootstrap-system"
export METAL3_CAPM3NAMESPACE="capm3-system"
export METAL3_RKE2CONTROLPLANENAMESPACE="rke2-control-plane-system"

###########
# METALLB #
###########
export METALLBNAMESPACE="metallb-system"

###########
# RANCHER #
###########
export RANCHER_CHART_NAME="rancher"
export RANCHER_CHART_VERSION="2.8.2"
export RANCHER_CHART_VALUESFILE="rancher.yaml"
export RANCHER_CHART_CREATENAMESPACE="True"
export RANCHER_CHART_INSTALLATIONNAMESPACE="kube-system"
export RANCHER_CHART_TARGETNAMESPACE="cattle-system"

export RANCHER_FINALPASSWORD="adminadminadmin"

############################
# RANCHER CHART REPOSITORY #
############################
export RANCHER_CHART_REPOSITORY_NAME="rancher-stable"
export RANCHER_CHART_REPOSITORY_URL="https://releases.rancher.com/server-charts/stable"
export RANCHER_CHART_REPOSITORY_CAFILE=""
export RANCHER_CHART_REPOSITORY_PLAINHTTP="False"
export RANCHER_CHART_REPOSITORY_SKIPTLSVERIFY="False"
export RANCHER_CHART_REPOSITORY_USERNAME=""
export RANCHER_CHART_REPOSITORY_PASSWORD=""

die(){
echo ${1} 1>&2
exit ${2}
}

setup_kubetools(){
RETRIES=10
SLEEPTIME=2

# Identify if K3s or RKE2 (timeout = reties * sleep time)
t=${RETRIES}
until [ -e ${RKE2KUBECONFIG} ] || [ -e ${K3SKUBECONFIG} ] && (( t-- > 0 )); do
sleep ${SLEEPTIME}
done
if [ -e "${RKE2KUBECONFIG}" ]; then
export KUBECONFIG=${RKE2KUBECONFIG}
export KUBECTL=${RKE2KUBECTL}
else
export KUBECONFIG=${K3SKUBECONFIG}
export KUBECTL=${K3SKUBECTL}
fi

# Wait for the node to be available, meaning the K8s API is available
while ! ${KUBECTL} wait --for condition=ready node $(cat /etc/hostname | tr '[:upper:]' '[:lower:]') ; do sleep 2 ; done

# https://github.com/rancher/rke2/issues/3958
if [ "${KUBECTL}" == "${RKE2KUBECTL}" ]; then
# Wait for the rke2-ingress-nginx-controller DS to be available if using RKE2
while ! ${KUBECTL} rollout status daemonset -n kube-system rke2-ingress-nginx-controller --timeout=60s; do sleep 2 ; done
fi
}

setup_suse_internal_ca(){
# Check if the CA configmap is already available
if [ $(${KUBECTL} get configmap -n ${SUSECACMNAMESPACE} ${SUSECACM} -o name | wc -l) -lt 1 ]; then
if [ -f ${SUSECAFILE} ]; then
# Create the CA
${KUBECTL} create cm ${SUSECACM} -n ${SUSECACMNAMESPACE} --from-file=ca.crt=${SUSECAFILE}
fi
fi
}

setup_kubetools
setup_suse_internal_ca
29 changes: 29 additions & 0 deletions examples/edge-stack-iso/custom/files/edge-stack-setup.service
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
[Unit]
Description=Setup Edge stack components
Wants=network-online.target
# It requires rke2 or k3s running, but it won't fail if those services are not present
After=network.target network-online.target rke2-server.service k3s.service
# At least, the common.sh needs to be present
ConditionPathExists=/opt/edge/bin/basic-setup.sh

[Service]
User=root
Type=forking
# Metal3 can take A LOT to download the IPA image
TimeoutStartSec=1800

ExecStartPre=/bin/sh -c "echo 'Starting to setup Edge components'"
# Scripts are executed in StartPre because it can be multiple ones
ExecStartPre=/opt/edge/bin/rancher.sh
ExecStartPre=/opt/edge/bin/metal3.sh
ExecStart=/bin/sh -c "echo 'Finished'"
RemainAfterExit=yes
KillMode=process
# Disable & delete everything
ExecStartPost=rm -f /opt/edge/bin/rancher.sh
ExecStartPost=rm -f /opt/edge/bin/metal3.sh
ExecStartPost=/bin/sh -c "systemctl disable edge-stack-setup.service"
ExecStartPost=rm -f /etc/systemd/system/edge-stack-setup.service

[Install]
WantedBy=multi-user.target
128 changes: 128 additions & 0 deletions examples/edge-stack-iso/custom/files/metal3.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,128 @@
#!/bin/bash
set -euo pipefail

BASEDIR="$(dirname "$0")"
source ${BASEDIR}/basic-setup.sh

METAL3LOCKNAMESPACE="default"
METAL3LOCKCMNAME="metal3-lock"

# Get or create the lock to run all those steps just in a single node
# As the first node is created WAY before the others, this should be enough
# TODO: Investigate if leases is better
if [ $(${KUBECTL} get cm -n ${METAL3LOCKNAMESPACE} ${METAL3LOCKCMNAME} -o name | wc -l) -lt 1 ]; then
${KUBECTL} create configmap ${METAL3LOCKCMNAME} -n ${METAL3LOCKNAMESPACE} --from-literal foo=bar
else
exit 0
fi

# Wait for metal3
while ! ${KUBECTL} wait --for condition=ready -n ${METAL3_CHART_TARGETNAMESPACE} $(${KUBECTL} get pods -n ${METAL3_CHART_TARGETNAMESPACE} -l app.kubernetes.io/name=metal3-ironic -o name) --timeout=10s; do sleep 2 ; done

# Get the ironic IP
IRONICIP=$(${KUBECTL} get cm -n ${METAL3_CHART_TARGETNAMESPACE} ironic-bmo -o jsonpath='{.data.IRONIC_IP}')

# This only works for metallb, we need to do the same for nodeport I guess

# Wait for metallb
while ! ${KUBECTL} wait --for condition=ready -n ${METALLBNAMESPACE} $(${KUBECTL} get pods -n ${METALLBNAMESPACE} -l app.kubernetes.io/component=controller -o name) --timeout=10s; do sleep 2 ; done

# Don't create the ippool if already created
${KUBECTL} get ipaddresspool -n ${METALLBNAMESPACE} ironic-ip-pool -o name || cat <<-EOF | ${KUBECTL} apply -f -
apiVersion: metallb.io/v1beta1
kind: IPAddressPool
metadata:
name: ironic-ip-pool
namespace: ${METALLBNAMESPACE}
spec:
addresses:
- ${IRONICIP}/32
serviceAllocation:
priority: 100
serviceSelectors:
- matchExpressions:
- {key: app.kubernetes.io/name, operator: In, values: [metal3-ironic]}
EOF

# Same for L2 Advs
${KUBECTL} get L2Advertisement -n ${METALLBNAMESPACE} ironic-ip-pool-l2-adv -o name || cat <<-EOF | ${KUBECTL} apply -f -
apiVersion: metallb.io/v1beta1
kind: L2Advertisement
metadata:
name: ironic-ip-pool-l2-adv
namespace: ${METALLBNAMESPACE}
spec:
ipAddressPools:
- ironic-ip-pool
EOF

# If clusterctl is not installed, install it
if ! command -v clusterctl > /dev/null 2>&1; then
LINUXARCH=$(uname -m)
case $(uname -m) in
"x86_64")
export GOARCH="amd64" ;;
"aarch64")
export GOARCH="arm64" ;;
"*")
echo "Arch not found, asumming amd64"
export GOARCH="amd64" ;;
esac

# Clusterctl bin
# Maybe just use the binary from hauler if available
curl -L https://github.com/kubernetes-sigs/cluster-api/releases/download/v${METAL3_CLUSTERCTLVERSION}/clusterctl-linux-${GOARCH} -o /usr/local/bin/clusterctl
chmod +x /usr/local/bin/clusterctl
fi

# If rancher is deployed
if [ $(${KUBECTL} get pods -n ${RANCHER_CHART_TARGETNAMESPACE} -l app=rancher -o name | wc -l) -ge 1 ]; then
cat <<-EOF | ${KUBECTL} apply -f -
apiVersion: management.cattle.io/v3
kind: Feature
metadata:
name: embedded-cluster-api
spec:
value: false
EOF

# Disable Rancher webhooks for CAPI
${KUBECTL} delete mutatingwebhookconfiguration.admissionregistration.k8s.io mutating-webhook-configuration
${KUBECTL} delete validatingwebhookconfigurations.admissionregistration.k8s.io validating-webhook-configuration
${KUBECTL} wait --for=delete namespace/cattle-provisioning-capi-system --timeout=300s
fi

# Deploy CAPI
if [ $(${KUBECTL} get pods -n ${METAL3_CAPISYSTEMNAMESPACE} -o name | wc -l) -lt 1 ]; then

# https://github.com/rancher-sandbox/cluster-api-provider-rke2#setting-up-clusterctl
mkdir -p ~/.cluster-api
cat <<-EOF > ~/.cluster-api/clusterctl.yaml
images:
all:
repository: registry.opensuse.org/isv/suse/edge/clusterapi/containerfile/suse
EOF

clusterctl init \
--core "cluster-api:v${METAL3_CAPICOREVERSION}"\
--infrastructure "metal3:v${METAL3_CAPIMETAL3VERSION}"\
--bootstrap "${METAL3_CAPIPROVIDER}:v${METAL3_CAPIRKE2VERSION}"\
--control-plane "${METAL3_CAPIPROVIDER}:v${METAL3_CAPIRKE2VERSION}"

# Wait for capi-controller-manager
while ! ${KUBECTL} wait --for condition=ready -n ${METAL3_CAPISYSTEMNAMESPACE} $(${KUBECTL} get pods -n ${METAL3_CAPISYSTEMNAMESPACE} -l cluster.x-k8s.io/provider=cluster-api -o name) --timeout=10s; do sleep 2 ; done

# Wait for capm3-controller-manager, there are two pods, the ipam and the capm3 one, just wait for the first one
while ! ${KUBECTL} wait --for condition=ready -n ${METAL3_CAPM3NAMESPACE} $(${KUBECTL} get pods -n ${METAL3_CAPM3NAMESPACE} -l cluster.x-k8s.io/provider=infrastructure-metal3 -o name | head -n1 ) --timeout=10s; do sleep 2 ; done

# Wait for rke2-bootstrap-controller-manager
while ! ${KUBECTL} wait --for condition=ready -n ${METAL3_RKE2BOOTSTRAPNAMESPACE} $(${KUBECTL} get pods -n ${METAL3_RKE2BOOTSTRAPNAMESPACE} -l cluster.x-k8s.io/provider=bootstrap-rke2 -o name) --timeout=10s; do sleep 2 ; done

# Wait for rke2-control-plane-controller-manager
while ! ${KUBECTL} wait --for condition=ready -n ${METAL3_RKE2CONTROLPLANENAMESPACE} $(${KUBECTL} get pods -n ${METAL3_RKE2CONTROLPLANENAMESPACE} -l cluster.x-k8s.io/provider=control-plane-rke2 -o name) --timeout=10s; do sleep 2 ; done

fi

# Clean up the lock cm

${KUBECTL} delete configmap ${METAL3LOCKCMNAME} -n ${METAL3LOCKNAMESPACE}
50 changes: 50 additions & 0 deletions examples/edge-stack-iso/custom/files/rancher.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
#!/bin/bash
set -euo pipefail

BASEDIR="$(dirname "$0")"
source ${BASEDIR}/basic-setup.sh

RANCHERLOCKNAMESPACE="default"
RANCHERLOCKCMNAME="rancher-lock"

if [ -z "${RANCHER_FINALPASSWORD}" ]; then
# If there is no final password, then finish the setup right away
exit 0
fi

# Get or create the lock to run all those steps just in a single node
# As the first node is created WAY before the others, this should be enough
# TODO: Investigate if leases is better
if [ $(${KUBECTL} get cm -n ${RANCHERLOCKNAMESPACE} ${RANCHERLOCKCMNAME} -o name | wc -l) -lt 1 ]; then
${KUBECTL} create configmap ${RANCHERLOCKCMNAME} -n ${RANCHERLOCKNAMESPACE} --from-literal foo=bar
else
exit 0
fi

# Wait for rancher to be deployed
while ! ${KUBECTL} wait --for condition=ready -n ${RANCHER_CHART_TARGETNAMESPACE} $(${KUBECTL} get pods -n ${RANCHER_CHART_TARGETNAMESPACE} -l app=rancher -o name) --timeout=10s; do sleep 2 ; done
until ${KUBECTL} get ingress -n ${RANCHER_CHART_TARGETNAMESPACE} rancher > /dev/null 2>&1; do sleep 10; done

RANCHERBOOTSTRAPPASSWORD=$(${KUBECTL} get secret -n ${RANCHER_CHART_TARGETNAMESPACE} bootstrap-secret -o jsonpath='{.data.bootstrapPassword}' | base64 -d)
RANCHERHOSTNAME=$(${KUBECTL} get ingress -n ${RANCHER_CHART_TARGETNAMESPACE} rancher -o jsonpath='{.spec.rules[0].host}')

# Add the protocol
RANCHERHOSTNAME="https://${RANCHERHOSTNAME}"
TOKEN=""
while [ -z "${TOKEN}" ]; do
# Get token
sleep 2
TOKEN=$(curl -sk -X POST ${RANCHERHOSTNAME}/v3-public/localProviders/local?action=login -H 'content-type: application/json' -d "{\"username\":\"admin\",\"password\":\"${RANCHERBOOTSTRAPPASSWORD}\"}" | jq -r .token)
done

# Set password
curl -sk ${RANCHERHOSTNAME}/v3/users?action=changepassword -H 'content-type: application/json' -H "Authorization: Bearer $TOKEN" -d "{\"currentPassword\":\"${RANCHERBOOTSTRAPPASSWORD}\",\"newPassword\":\"${RANCHER_FINALPASSWORD}\"}"

# Create a temporary API token (ttl=60 minutes)
APITOKEN=$(curl -sk ${RANCHERHOSTNAME}/v3/token -H 'content-type: application/json' -H "Authorization: Bearer ${TOKEN}" -d '{"type":"token","description":"automation","ttl":3600000}' | jq -r .token)

curl -sk ${RANCHERHOSTNAME}/v3/settings/server-url -H 'content-type: application/json' -H "Authorization: Bearer ${APITOKEN}" -X PUT -d "{\"name\":\"server-url\",\"value\":\"${RANCHERHOSTNAME}\"}"
curl -sk ${RANCHERHOSTNAME}/v3/settings/telemetry-opt -X PUT -H 'content-type: application/json' -H 'accept: application/json' -H "Authorization: Bearer ${APITOKEN}" -d '{"value":"out"}'

# Clean up the lock cm
${KUBECTL} delete configmap ${RANCHERLOCKCMNAME} -n ${RANCHERLOCKNAMESPACE}
4 changes: 4 additions & 0 deletions examples/edge-stack-iso/custom/scripts/99-alias.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
#!/bin/bash
echo "alias k=kubectl" >> /etc/profile.local
echo "alias kubectl=/var/lib/rancher/rke2/bin/kubectl" >> /etc/profile.local
echo "export KUBECONFIG=/etc/rancher/rke2/rke2.yaml" >> /etc/profile.local
17 changes: 17 additions & 0 deletions examples/edge-stack-iso/custom/scripts/99_edge-setup.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
#!/bin/bash

# Copy the basic setup script from combustion to the final location
mkdir -p /opt/edge/bin/
cp basic-setup.sh /opt/edge/bin/
chmod a+x /opt/edge/bin/basic-setup.sh

# Same for rancher
cp rancher.sh /opt/edge/bin/
chmod a+x /opt/edge/bin/rancher.sh
# Same for metal3
cp metal3.sh /opt/edge/bin/
chmod a+x /opt/edge/bin/metal3.sh

# Copy the systemd unit file
cp edge-stack-setup.service /etc/systemd/system/edge-stack-setup.service
systemctl enable edge-stack-setup.service
Loading

0 comments on commit 338e559

Please sign in to comment.