From f0580014cb7a37f1466e7329abd4d63eb09d5afa Mon Sep 17 00:00:00 2001 From: Vladimir Nadvornik Date: Fri, 24 May 2024 14:47:50 +0200 Subject: [PATCH 01/19] Kubernetes support for Hub XML-RPC --- mgradm/cmd/inspect/kubernetes.go | 3 +- mgradm/cmd/inspect/podman.go | 9 ++-- mgradm/cmd/install/kubernetes/utils.go | 3 +- mgradm/cmd/migrate/kubernetes/utils.go | 25 +++++++++-- mgradm/cmd/scale/podman.go | 2 +- mgradm/cmd/upgrade/kubernetes/utils.go | 4 +- mgradm/shared/kubernetes/install.go | 54 ++++++++++++++++++++--- mgradm/shared/kubernetes/k3s.go | 9 ++-- mgradm/shared/utils/exec.go | 32 -------------- shared/kubernetes/k3s.go | 4 +- shared/kubernetes/utils.go | 15 +++++++ uyuni-tools.changes.nadvornik.hub-xmlrpc2 | 1 + 12 files changed, 104 insertions(+), 57 deletions(-) create mode 100644 uyuni-tools.changes.nadvornik.hub-xmlrpc2 diff --git a/mgradm/cmd/inspect/kubernetes.go b/mgradm/cmd/inspect/kubernetes.go index eaf363872..89d7f35c8 100644 --- a/mgradm/cmd/inspect/kubernetes.go +++ b/mgradm/cmd/inspect/kubernetes.go @@ -13,7 +13,6 @@ import ( "github.com/rs/zerolog/log" "github.com/spf13/cobra" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" "github.com/uyuni-project/uyuni-tools/shared" shared_kubernetes "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" @@ -36,7 +35,7 @@ func kuberneteInspect( if len(serverImage) <= 0 { log.Debug().Msg("Use deployed image") - serverImage, err = adm_utils.RunningImage(cnx) + serverImage, err = shared_kubernetes.GetRunningImage("uyuni") if err != nil { return errors.New(L("failed to find the image of the currently running server container: %s")) } diff --git a/mgradm/cmd/inspect/podman.go b/mgradm/cmd/inspect/podman.go index 9202fb349..90f85c5ac 100644 --- a/mgradm/cmd/inspect/podman.go +++ b/mgradm/cmd/inspect/podman.go @@ -9,10 +9,8 @@ import ( "github.com/rs/zerolog/log" "github.com/spf13/cobra" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared" . "github.com/uyuni-project/uyuni-tools/shared/l10n" - shared_podman "github.com/uyuni-project/uyuni-tools/shared/podman" + "github.com/uyuni-project/uyuni-tools/shared/podman" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) @@ -31,13 +29,12 @@ func podmanInspect( if len(serverImage) <= 0 { log.Debug().Msg("Use deployed image") - cnx := shared.NewConnection("podman", shared_podman.ServerContainerName, "") - serverImage, err = adm_utils.RunningImage(cnx) + serverImage, err = podman.GetRunningImage(podman.ServerContainerName) if err != nil { return utils.Errorf(err, L("failed to find the image of the currently running server container")) } } - inspectResult, err := shared_podman.Inspect(serverImage, flags.Image.PullPolicy, flags.SCC) + inspectResult, err := podman.Inspect(serverImage, flags.Image.PullPolicy, flags.SCC) if err != nil { return utils.Errorf(err, L("inspect command failed")) } diff --git a/mgradm/cmd/install/kubernetes/utils.go b/mgradm/cmd/install/kubernetes/utils.go index ff0b56514..a58fb6553 100644 --- a/mgradm/cmd/install/kubernetes/utils.go +++ b/mgradm/cmd/install/kubernetes/utils.go @@ -74,7 +74,8 @@ func installForKubernetes(_ *types.GlobalFlags, } // Deploy Uyuni and wait for it to be up - if err := kubernetes.Deploy(cnx, flags.Image.Registry, &flags.Image, &flags.Helm, + if err := kubernetes.Deploy( + cnx, flags.Image.Registry, &flags.Image, &flags.HubXmlrpc, &flags.Helm, clusterInfos, fqdn, flags.Debug.Java, false, helmArgs..., ); err != nil { return shared_utils.Errorf(err, L("cannot deploy uyuni")) diff --git a/mgradm/cmd/migrate/kubernetes/utils.go b/mgradm/cmd/migrate/kubernetes/utils.go index 22632156e..c2babd598 100644 --- a/mgradm/cmd/migrate/kubernetes/utils.go +++ b/mgradm/cmd/migrate/kubernetes/utils.go @@ -48,6 +48,12 @@ func migrateToKubernetes( return utils.Errorf(err, L("failed to compute image URL")) } + hubXmlrpcImage := "" + hubXmlrpcImage, err = utils.ComputeImage(flags.Image.Registry, flags.Image.Tag, flags.HubXmlrpc.Image) + if err != nil { + return err + } + fqdn := args[0] if err := utils.IsValidFQDN(fqdn); err != nil { return err @@ -89,8 +95,9 @@ func migrateToKubernetes( "--set", "migration.dataPath="+scriptDir, ) - if err := kubernetes.Deploy(cnx, flags.Image.Registry, &flags.Image, &flags.Helm, - clusterInfos, fqdn, false, flags.Prepare, migrationArgs...); err != nil { + if err := kubernetes.Deploy(cnx, flags.Image.Registry, &flags.Image, &flags.HubXmlrpc, + &flags.Helm, clusterInfos, fqdn, false, flags.Prepare, migrationArgs..., + ); err != nil { return utils.Errorf(err, L("cannot run deploy")) } @@ -145,8 +152,10 @@ func migrateToKubernetes( helmArgs = append(helmArgs, setupSSLArray...) // Run uyuni upgrade using the new ssl certificate + // We don't need to start the Hub XML-RPC API containers during the setup phase if err = kubernetes.UyuniUpgrade( - serverImage, flags.Image.PullPolicy, &flags.Helm, kubeconfig, fqdn, clusterInfos.Ingress, helmArgs..., + serverImage, flags.Image.PullPolicy, 0, hubXmlrpcImage, &flags.Helm, + kubeconfig, fqdn, clusterInfos.Ingress, helmArgs..., ); err != nil { return utils.Errorf(err, L("cannot upgrade helm chart to image %s using new SSL certificate"), serverImage) } @@ -182,8 +191,16 @@ func migrateToKubernetes( return utils.Errorf(err, L("cannot run post upgrade script")) } + hubReplicas := flags.HubXmlrpc.Replicas + if extractedData.HasHubXmlrpcAPI { + log.Info().Msg(L("Enabling Hub XML-RPC API since it is enabled on the migrated server")) + hubReplicas = 1 + } + + // This is the final deployment, all the replicas need to be correct here. if err := kubernetes.UyuniUpgrade( - serverImage, flags.Image.PullPolicy, &flags.Helm, kubeconfig, fqdn, clusterInfos.Ingress, helmArgs..., + serverImage, flags.Image.PullPolicy, hubReplicas, hubXmlrpcImage, &flags.Helm, kubeconfig, + fqdn, clusterInfos.Ingress, helmArgs..., ); err != nil { return utils.Errorf(err, L("cannot upgrade to image %s"), serverImage) } diff --git a/mgradm/cmd/scale/podman.go b/mgradm/cmd/scale/podman.go index 100d1c2f0..d40df4e33 100644 --- a/mgradm/cmd/scale/podman.go +++ b/mgradm/cmd/scale/podman.go @@ -31,7 +31,7 @@ func podmanScale( } if service == podman.HubXmlrpcService { if newReplicas > 1 { - return errors.New(L("Multiple Hub XML-RPC container replicas are not currently supported.")) + return errors.New(L("Multiple Hub XML-RPC API container replicas are not currently supported.")) } return systemd.ScaleService(newReplicas, service) } diff --git a/mgradm/cmd/upgrade/kubernetes/utils.go b/mgradm/cmd/upgrade/kubernetes/utils.go index 6b7a05af1..161abd297 100644 --- a/mgradm/cmd/upgrade/kubernetes/utils.go +++ b/mgradm/cmd/upgrade/kubernetes/utils.go @@ -18,5 +18,7 @@ func upgradeKubernetes( cmd *cobra.Command, args []string, ) error { - return kubernetes.Upgrade(globalFlags, &flags.Image, &flags.DBUpgradeImage, flags.Helm, cmd, args) + return kubernetes.Upgrade( + globalFlags, &flags.UpgradeFlags.Image, &flags.DBUpgradeImage, &flags.HubXmlrpc.Image, flags.Helm, cmd, args, + ) } diff --git a/mgradm/shared/kubernetes/install.go b/mgradm/shared/kubernetes/install.go index 0fb54fdc6..461bef4aa 100644 --- a/mgradm/shared/kubernetes/install.go +++ b/mgradm/shared/kubernetes/install.go @@ -28,6 +28,7 @@ func Deploy( cnx *shared.Connection, registry string, imageFlags *types.ImageFlags, + hubXmlrpcFlags *cmd_utils.HubXmlrpcFlags, helmFlags *cmd_utils.HelmFlags, clusterInfos *kubernetes.ClusterInfos, fqdn string, @@ -39,10 +40,11 @@ func Deploy( isK3s := clusterInfos.IsK3s() IsRke2 := clusterInfos.IsRke2() if !prepare { + tcpPorts, udpPorts := GetPortLists(hubXmlrpcFlags.Replicas > 0, debug) if isK3s { - InstallK3sTraefikConfig(debug) + kubernetes.InstallK3sTraefikConfig(tcpPorts, udpPorts) } else if IsRke2 { - kubernetes.InstallRke2NginxConfig(utils.TCPPorts, utils.UDPPorts, helmFlags.Uyuni.Namespace) + kubernetes.InstallRke2NginxConfig(tcpPorts, udpPorts, helmFlags.Uyuni.Namespace) } } @@ -51,9 +53,15 @@ func Deploy( return utils.Errorf(err, L("failed to compute image URL")) } + hubXmlrpcImage, err := utils.ComputeImage(registry, imageFlags.Tag, hubXmlrpcFlags.Image) + if err != nil { + return utils.Errorf(err, L("failed to compute image URL")) + } + // Install the uyuni server helm chart if err := UyuniUpgrade( - serverImage, imageFlags.PullPolicy, helmFlags, clusterInfos.GetKubeconfig(), fqdn, clusterInfos.Ingress, helmArgs..., + serverImage, imageFlags.PullPolicy, hubXmlrpcFlags.Replicas, hubXmlrpcImage, helmFlags, + clusterInfos.GetKubeconfig(), fqdn, clusterInfos.Ingress, helmArgs..., ); err != nil { return utils.Errorf(err, L("cannot upgrade")) } @@ -107,8 +115,17 @@ func DeployExistingCertificate( } // UyuniUpgrade runs an helm upgrade using images and helm configuration as parameters. -func UyuniUpgrade(serverImage string, pullPolicy string, helmFlags *cmd_utils.HelmFlags, kubeconfig string, - fqdn string, ingress string, helmArgs ...string) error { +func UyuniUpgrade( + serverImage string, + pullPolicy string, + hubXmlrpcReplicas int, + hubXmlrpcImage string, + helmFlags *cmd_utils.HelmFlags, + kubeconfig string, + fqdn string, + ingress string, + helmArgs ...string, +) error { log.Info().Msg(L("Installing Uyuni")) // The guessed ingress is passed before the user's value to let the user override it in case we got it wrong. @@ -127,6 +144,12 @@ func UyuniUpgrade(serverImage string, pullPolicy string, helmFlags *cmd_utils.He "--set", "pullPolicy="+kubernetes.GetPullPolicy(pullPolicy), "--set", "fqdn="+fqdn) + if hubXmlrpcReplicas > 0 { + log.Info().Msg(L("Enabling Hub XMLRPC API container.")) + helmParams = append(helmParams, + "--set", fmt.Sprintf("hub.api.replicas=%v", hubXmlrpcReplicas), + "--set", "images.hub_xmlrpc="+hubXmlrpcImage) + } helmParams = append(helmParams, helmArgs...) namespace := helmFlags.Uyuni.Namespace @@ -140,6 +163,7 @@ func Upgrade( _ *types.GlobalFlags, image *types.ImageFlags, upgradeImage *types.ImageFlags, + hubXmlrpcImage *types.ImageFlags, helm cmd_utils.HelmFlags, _ *cobra.Command, _ []string, @@ -156,6 +180,11 @@ func Upgrade( return utils.Errorf(err, L("failed retrieving namespace")) } + origHubXmlrpcImage, err := kubernetes.GetRunningImage("hub-xmlrpc-api") + if err != nil { + return utils.Errorf(err, L("failed to find Hub XML-RPC API container")) + } + serverImage, err := utils.ComputeImage(image.Registry, utils.DefaultTag, *image) if err != nil { return utils.Errorf(err, L("failed to compute image URL")) @@ -238,7 +267,20 @@ func Upgrade( helmArgs = append(helmArgs, "--set", "registrySecret="+pullSecret) } - err = UyuniUpgrade(serverImage, image.PullPolicy, &helm, kubeconfig, fqdn, clusterInfos.Ingress, helmArgs...) + hubXmlrpcImageName, err := utils.ComputeImage(image.Registry, image.Tag, *hubXmlrpcImage) + if err != nil { + return utils.Errorf(err, L("failed to compute image URL")) + } + + hubXmlrpcReplicas := 0 + if origHubXmlrpcImage != "" { + hubXmlrpcReplicas = 1 + } + + err = UyuniUpgrade( + serverImage, image.PullPolicy, hubXmlrpcReplicas, hubXmlrpcImageName, &helm, kubeconfig, fqdn, + clusterInfos.Ingress, helmArgs..., + ) if err != nil { return utils.Errorf(err, L("cannot upgrade to image %s"), serverImage) } diff --git a/mgradm/shared/kubernetes/k3s.go b/mgradm/shared/kubernetes/k3s.go index 2aceb20e7..81a4722f5 100644 --- a/mgradm/shared/kubernetes/k3s.go +++ b/mgradm/shared/kubernetes/k3s.go @@ -15,15 +15,18 @@ import ( "github.com/uyuni-project/uyuni-tools/shared/utils" ) -// InstallK3sTraefikConfig installs the K3s Traefik configuration. -func InstallK3sTraefikConfig(debug bool) { +// GetPortLists returns compiled lists of tcp and udp ports.. +func GetPortLists(hub bool, debug bool) ([]types.PortMap, []types.PortMap) { tcpPorts := []types.PortMap{} tcpPorts = append(tcpPorts, utils.TCPPorts...) if debug { tcpPorts = append(tcpPorts, utils.DebugPorts...) } + if hub { + tcpPorts = append(tcpPorts, utils.HubXmlrpcPorts...) + } - kubernetes.InstallK3sTraefikConfig(tcpPorts, utils.UDPPorts) + return tcpPorts, utils.UDPPorts } // RunPgsqlVersionUpgrade perform a PostgreSQL major upgrade. diff --git a/mgradm/shared/utils/exec.go b/mgradm/shared/utils/exec.go index 93dad92e0..d4cd9878b 100644 --- a/mgradm/shared/utils/exec.go +++ b/mgradm/shared/utils/exec.go @@ -15,7 +15,6 @@ import ( "github.com/rs/zerolog/log" "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" "github.com/uyuni-project/uyuni-tools/shared" - "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/utils" ) @@ -137,37 +136,6 @@ func GenerateMigrationScript(sourceFqdn string, user string, kubernetes bool, pr return scriptDir, cleaner, nil } -// RunningImage returns the image running in the current system. -func RunningImage(cnx *shared.Connection) (string, error) { - command, err := cnx.GetCommand() - - switch command { - case "podman": - args := []string{"ps", "--format", "{{.Image}}", "--noheading"} - image, err := utils.RunCmdOutput(zerolog.DebugLevel, "podman", args...) - if err != nil { - return "", err - } - return strings.Trim(string(image), "\n"), nil - - case "kubectl": - - // FIXME this will work until containers 0 is uyuni. Then jsonpath should be something like - // {.items[0].spec.containers[?(@.name=="` + containerName + `")].image but there are problems - // using RunCmdOutput with an arguments with round brackets - args := []string{"get", "pods", kubernetes.ServerFilter, "-o", "jsonpath={.items[0].spec.containers[0].image}"} - image, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", args...) - - log.Info().Msgf(L("Image is: %s"), image) - if err != nil { - return "", err - } - return strings.Trim(string(image), "\n"), nil - } - - return command, err -} - // SanityCheck verifies if an upgrade can be run. func SanityCheck(cnx *shared.Connection, inspectedValues *utils.ServerInspectData, serverImage string) error { isUyuni, err := isUyuni(cnx) diff --git a/shared/kubernetes/k3s.go b/shared/kubernetes/k3s.go index c394415b3..64227ba1f 100644 --- a/shared/kubernetes/k3s.go +++ b/shared/kubernetes/k3s.go @@ -44,10 +44,12 @@ func waitForTraefik() { if err == nil { completionTime, err := time.Parse(time.RFC3339, string(out)) if err == nil && time.Since(completionTime).Seconds() < 60 { - break + return } } + time.Sleep(1 * time.Second) } + log.Error().Msg(L("Failed to reload K3s Traefik")) } // UninstallK3sTraefikConfig uninstall K3s Traefik configuration. diff --git a/shared/kubernetes/utils.go b/shared/kubernetes/utils.go index 8c3f6cb84..005db256e 100644 --- a/shared/kubernetes/utils.go +++ b/shared/kubernetes/utils.go @@ -389,3 +389,18 @@ func GenerateOverrideDeployment(deployData types.Deployment) (string, error) { } return string(ret), nil } + +// GetRunningImage returns the image of containerName for the server running in the current system. +func GetRunningImage(containerName string) (string, error) { + args := []string{ + "get", "pods", "-A", ServerFilter, + "-o", "jsonpath={.items[0].spec.containers[?(@.name=='" + containerName + "')].image}", + } + image, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", args...) + + log.Debug().Msgf("%[1]s container image is: %[2]s", containerName, image) + if err != nil { + return "", err + } + return strings.Trim(string(image), "\n"), nil +} diff --git a/uyuni-tools.changes.nadvornik.hub-xmlrpc2 b/uyuni-tools.changes.nadvornik.hub-xmlrpc2 new file mode 100644 index 000000000..7202a3c2c --- /dev/null +++ b/uyuni-tools.changes.nadvornik.hub-xmlrpc2 @@ -0,0 +1 @@ +- Handle Hub XML-RPC during migration and upgrade and add Kubernetes support From 1f7624b5f7d2c88b031e9dff3e66bd70f67bf50b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 7 Aug 2024 11:49:44 +0200 Subject: [PATCH 02/19] Backup the pgsql data after rsyncing them During a migration to kubernetes the server is deployed after the rsync to prepare the SSL secrets and PVC. This has the nasty effect to corrupt the synchronized data with a too recent catalog version ID. This would let the DB migration to fail starting the old postgresql server. To workaround this, move the data to the the backup place after the rsync instead of the begining of the db upgrade. --- mgradm/shared/templates/migrateScriptTemplate.go | 15 +++++++++++++-- .../pgsqlVersionUpgradeScriptTemplate.go | 7 +++++-- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/mgradm/shared/templates/migrateScriptTemplate.go b/mgradm/shared/templates/migrateScriptTemplate.go index 67abc912a..a7d4f924a 100644 --- a/mgradm/shared/templates/migrateScriptTemplate.go +++ b/mgradm/shared/templates/migrateScriptTemplate.go @@ -126,8 +126,10 @@ echo "Extracting time zone..." $SSH {{ .SourceFqdn }} timedatectl show -p Timezone >/var/lib/uyuni-tools/data echo "Extracting postgresql versions..." -echo "image_pg_version=$(rpm -qa --qf '%{VERSION}\n' 'name=postgresql[0-8][0-9]-server' | cut -d. -f1 | sort -n | tail -1)" >> /var/lib/uyuni-tools/data -echo "current_pg_version=$(cat /var/lib/pgsql/data/PG_VERSION)" >> /var/lib/uyuni-tools/data +image_pg_version=$(rpm -qa --qf '%{VERSION}\n' 'name=postgresql[0-8][0-9]-server' | cut -d. -f1 | sort -n | tail -1) +current_pg_version=$(cat /var/lib/pgsql/data/PG_VERSION) +echo "image_pg_version=$image_pg_version" >> /var/lib/uyuni-tools/data +echo "current_pg_version=$current_pg_version" >> /var/lib/uyuni-tools/data grep '^db_user' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data grep '^db_password' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data @@ -151,6 +153,15 @@ sed 's/--add-modules java.annotation,com.sun.xml.bind://' -i /etc/tomcat/conf.d/ sed 's/-XX:-UseConcMarkSweepGC//' -i /etc/tomcat/conf.d/* test -f /etc/tomcat/conf.d/remote_debug.conf && sed 's/address=[^:]*:/address=*:/' -i /etc/tomcat/conf.d/remote_debug.conf +# Create a backup copy of the data to prepare DB upgrade. +# We need to upgrade the deployment before upgrading the database to get the SSL certificates ready. +# To avoid corrupting the database files, move them to where the upgrade script will expect them. +echo "Posgresql versions: image: $image_pg_version, current: $current_pg_version" +if test "$image_pg_version" != "$current_pg_version"; then + echo "Backing up the database files ..." + mv /var/lib/pgsql/data /var/lib/pgsql/data-pg$current_pg_version +fi + {{ if .Kubernetes }} echo 'server.no_ssl = 1' >> /etc/rhn/rhn.conf; echo "Extracting SSL certificate and authority" diff --git a/mgradm/shared/templates/pgsqlVersionUpgradeScriptTemplate.go b/mgradm/shared/templates/pgsqlVersionUpgradeScriptTemplate.go index 7758820a3..a4e188bd6 100644 --- a/mgradm/shared/templates/pgsqlVersionUpgradeScriptTemplate.go +++ b/mgradm/shared/templates/pgsqlVersionUpgradeScriptTemplate.go @@ -23,8 +23,9 @@ test -d /usr/lib/postgresql$NEW_VERSION/bin echo "Testing presence of postgresql$OLD_VERSION..." test -d /usr/lib/postgresql$OLD_VERSION/bin -echo "Create a backup at /var/lib/pgsql/data-pg$OLD_VERSION..." -mv /var/lib/pgsql/data /var/lib/pgsql/data-pg$OLD_VERSION +# Data have already been backed up at the end of the migration script +# Reset the potentially created new pgsql data +rm -rf /var/lib/pgsql/data echo "Create new database directory..." mkdir -p /var/lib/pgsql/data chown -R postgres:postgres /var/lib/pgsql @@ -45,6 +46,8 @@ echo "Running initdb using postgres user" echo "Any suggested command from the console should be run using postgres user" su -s /bin/bash - postgres -c "initdb -D /var/lib/pgsql/data --locale=$POSTGRES_LANG" echo "Successfully initialized new postgresql $NEW_VERSION database." + + su -s /bin/bash - postgres -c "pg_upgrade --old-bindir=/usr/lib/postgresql$OLD_VERSION/bin --new-bindir=/usr/lib/postgresql$NEW_VERSION/bin --old-datadir=/var/lib/pgsql/data-pg$OLD_VERSION --new-datadir=/var/lib/pgsql/data $FAST_UPGRADE" cp /var/lib/pgsql/data-pg$OLD_VERSION/pg_hba.conf /var/lib/pgsql/data From 250ad19f899105d06fbaf11bf467dccae8f0f300 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 7 Aug 2024 11:54:04 +0200 Subject: [PATCH 03/19] Refresh the connection after the k8s migration After the k8s migration the pod has been started again since the initial connection creation. We need to reset the connection to not search for the old pod name. --- mgradm/cmd/migrate/kubernetes/utils.go | 2 ++ mgradm/shared/kubernetes/install.go | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/mgradm/cmd/migrate/kubernetes/utils.go b/mgradm/cmd/migrate/kubernetes/utils.go index c2babd598..39b57f1e2 100644 --- a/mgradm/cmd/migrate/kubernetes/utils.go +++ b/mgradm/cmd/migrate/kubernetes/utils.go @@ -209,6 +209,8 @@ func migrateToKubernetes( return err } + // Reinitialize the connection since the pod name has changed since we first checked + cnx = shared.NewConnection("kubectl", "", shared_kubernetes.ServerFilter) if err := cnx.CopyCaCertificate(fqdn); err != nil { return utils.Errorf(err, L("failed to add SSL CA certificate to host trusted certificates")) } diff --git a/mgradm/shared/kubernetes/install.go b/mgradm/shared/kubernetes/install.go index 461bef4aa..d13a1a303 100644 --- a/mgradm/shared/kubernetes/install.go +++ b/mgradm/shared/kubernetes/install.go @@ -74,7 +74,7 @@ func Deploy( return cnx.WaitForServer() } -// DeployCertificate executre a deploy a new certificate given an helm. +// DeployCertificate deploys a new SSL certificate. func DeployCertificate(helmFlags *cmd_utils.HelmFlags, sslFlags *cmd_utils.InstallSSLFlags, rootCa string, ca *types.SSLPair, kubeconfig string, fqdn string, imagePullPolicy string) ([]string, error) { helmArgs := []string{} From f389b8c55d1790efa5388b729cf4dc7c832b5050 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 7 Aug 2024 11:55:36 +0200 Subject: [PATCH 04/19] Wait for 3 hours when running a pod Some pods require a long time to run. This is the case for the DB upgrade finalization that runs a potentially long reindex. --- shared/kubernetes/utils.go | 43 ++++---------------------------------- 1 file changed, 4 insertions(+), 39 deletions(-) diff --git a/shared/kubernetes/utils.go b/shared/kubernetes/utils.go index 005db256e..34df9518f 100644 --- a/shared/kubernetes/utils.go +++ b/shared/kubernetes/utils.go @@ -287,7 +287,10 @@ func RunPod( command string, override ...string, ) error { - arguments := []string{"run", podname, "-n", namespace, "--image", image, "--image-pull-policy", pullPolicy, filter} + arguments := []string{ + "run", "--rm", "-n", namespace, "--attach", "--pod-running-timeout=3h", "--restart=Never", podname, + "--image", image, "--image-pull-policy", pullPolicy, filter, + } if len(override) > 0 { arguments = append(arguments, `--override-type=strategic`) @@ -303,14 +306,6 @@ func RunPod( return utils.Errorf(err, PL("The first placeholder is a command", "cannot run %[1]s using image %[2]s"), command, image) } - err = waitForPod(namespace, podname) - if err != nil { - return utils.Errorf(err, L("deleting pod %s. Status fails with error"), podname) - } - - defer func() { - err = DeletePod(namespace, podname, filter) - }() return nil } @@ -332,36 +327,6 @@ func DeletePod(namespace string, podname string, filter string) error { return nil } -func waitForPod(namespace string, podname string) error { - status := "Succeeded" - waitSeconds := 120 - log.Debug().Msgf( - "Checking status for %s pod. Waiting %s seconds until status is %s", - podname, strconv.Itoa(waitSeconds), status, - ) - cmdArgs := []string{ - "get", "pod", podname, "-n", namespace, "--output=custom-columns=STATUS:.status.phase", "--no-headers", - } - var err error - for i := 0; i < waitSeconds; i++ { - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", cmdArgs...) - outStr := strings.TrimSuffix(string(out), "\n") - if err != nil { - return utils.Errorf(err, L("cannot execute %s"), strings.Join(cmdArgs, string(" "))) - } - if strings.EqualFold(outStr, status) { - log.Debug().Msgf("%s pod status is %s", podname, status) - return nil - } - if strings.EqualFold(outStr, "Failed") { - return utils.Errorf(err, L("error during execution of %s"), strings.Join(cmdArgs, string(" "))) - } - log.Debug().Msgf("Pod %s status is %s for %d seconds.", podname, outStr, i) - time.Sleep(1 * time.Second) - } - return utils.Errorf(err, L("pod %[1]s status is not %[2]s in %[3]d seconds"), podname, status, waitSeconds) -} - // GetNode return the node where the app is running. func GetNode(namespace string, filter string) (string, error) { nodeName := "" From 0a8ee8d8f75a018891b812489f7ef28d301ad8e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 8 Aug 2024 12:21:06 +0200 Subject: [PATCH 05/19] Add crds.keep value for cert-manager to keep feature parity with installCRDs --- mgradm/shared/kubernetes/certificates.go | 1 + 1 file changed, 1 insertion(+) diff --git a/mgradm/shared/kubernetes/certificates.go b/mgradm/shared/kubernetes/certificates.go index c8ecdfe9b..91159c7c2 100644 --- a/mgradm/shared/kubernetes/certificates.go +++ b/mgradm/shared/kubernetes/certificates.go @@ -113,6 +113,7 @@ func installCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, image args := []string{ "--set", "crds.enabled=true", + "--set", "crds.keep=true", "--set-json", "global.commonLabels={\"installedby\": \"mgradm\"}", "--set", "image.pullPolicy=" + kubernetes.GetPullPolicy(imagePullPolicy), } From 6cc93acca423bb757f351292eb4656c507dce4cf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 5 Sep 2024 16:09:39 +0200 Subject: [PATCH 06/19] Reorganize the kubernetes SSL CA deployment code for readability Of of the issuers creation function had two distinct behaviors and this was only generating confusion when reading the whole code. This function has been split and some useless intermediary functions have been merged. This with better function naming should make the SSL setup code more understandable. --- mgradm/cmd/install/kubernetes/utils.go | 20 ++- mgradm/cmd/migrate/kubernetes/utils.go | 4 +- mgradm/shared/kubernetes/certificates.go | 141 ++++++++++++++---- mgradm/shared/kubernetes/install.go | 41 ----- mgradm/shared/templates/issuerTemplate.go | 59 ++------ .../templates/reusedCaIssuerTemplate.go | 46 ++++++ 6 files changed, 186 insertions(+), 125 deletions(-) create mode 100644 mgradm/shared/templates/reusedCaIssuerTemplate.go diff --git a/mgradm/cmd/install/kubernetes/utils.go b/mgradm/cmd/install/kubernetes/utils.go index a58fb6553..8a2119ba0 100644 --- a/mgradm/cmd/install/kubernetes/utils.go +++ b/mgradm/cmd/install/kubernetes/utils.go @@ -59,13 +59,21 @@ func installForKubernetes(_ *types.GlobalFlags, } // Deploy the SSL CA or server certificate - ca := types.SSLPair{} - sslArgs, err := kubernetes.DeployCertificate(&flags.Helm, &flags.SSL, "", &ca, clusterInfos.GetKubeconfig(), fqdn, - flags.Image.PullPolicy) - if err != nil { - return shared_utils.Errorf(err, L("cannot deploy certificate")) + if flags.SSL.UseExisting() { + if err := kubernetes.DeployExistingCertificate(&flags.Helm, &flags.SSL); err != nil { + return err + } + } else { + sslArgs, err := kubernetes.DeployCertificate( + &flags.Helm, &flags.SSL, clusterInfos.GetKubeconfig(), fqdn, + flags.Image.PullPolicy, + ) + + if err != nil { + return shared_utils.Errorf(err, L("cannot deploy certificate")) + } + helmArgs = append(helmArgs, sslArgs...) } - helmArgs = append(helmArgs, sslArgs...) // Create a secret using SCC credentials if any are provided helmArgs, err = shared_kubernetes.AddSCCSecret(helmArgs, flags.Helm.Uyuni.Namespace, &flags.SCC) diff --git a/mgradm/cmd/migrate/kubernetes/utils.go b/mgradm/cmd/migrate/kubernetes/utils.go index 39b57f1e2..d0683002f 100644 --- a/mgradm/cmd/migrate/kubernetes/utils.go +++ b/mgradm/cmd/migrate/kubernetes/utils.go @@ -241,9 +241,7 @@ func setupSSL( cert := base64.StdEncoding.EncodeToString(out) ca := types.SSLPair{Cert: cert, Key: key} - // An empty struct means no third party certificate - sslFlags := adm_utils.InstallSSLFlags{} - ret, err := kubernetes.DeployCertificate(helm, &sslFlags, cert, &ca, kubeconfig, "", pullPolicy) + ret, err := kubernetes.DeployReusedCa(helm, &ca, kubeconfig, pullPolicy) if err != nil { return []string{}, utils.Errorf(err, L("cannot deploy certificate")) } diff --git a/mgradm/shared/kubernetes/certificates.go b/mgradm/shared/kubernetes/certificates.go index 91159c7c2..7dd36936c 100644 --- a/mgradm/shared/kubernetes/certificates.go +++ b/mgradm/shared/kubernetes/certificates.go @@ -6,6 +6,8 @@ package kubernetes import ( "encoding/base64" + "errors" + "os" "path/filepath" "time" @@ -15,21 +17,33 @@ import ( cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/ssl" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) -func installTLSSecret(namespace string, serverCrt []byte, serverKey []byte, rootCaCrt []byte) error { - crdsDir, cleaner, err := utils.TempDir() +// Helm annotation to add in order to use cert-manager's uyuni CA issuer, in JSON format. +const ingressCertManagerAnnotation = "ingressSSLAnnotations={\"cert-manager.io/issuer\": \"uyuni-ca-issuer\"}" + +// DeployExistingCertificate execute a deploy of an existing certificate. +func DeployExistingCertificate( + helmFlags *cmd_utils.HelmFlags, + sslFlags *cmd_utils.InstallSSLFlags, +) error { + // Deploy the SSL Certificate secret and CA configmap + serverCrt, rootCaCrt := ssl.OrderCas(&sslFlags.Ca, &sslFlags.Server) + serverKey := utils.ReadFile(sslFlags.Server.Key) + + tempDir, cleaner, err := utils.TempDir() if err != nil { return err } defer cleaner() - secretPath := filepath.Join(crdsDir, "secret.yaml") + secretPath := filepath.Join(tempDir, "secret.yaml") log.Info().Msg(L("Creating SSL server certificate secret")) tlsSecretData := templates.TLSSecretTemplateData{ - Namespace: namespace, + Namespace: helmFlags.Uyuni.Namespace, Name: "uyuni-cert", Certificate: base64.StdEncoding.EncodeToString(serverCrt), Key: base64.StdEncoding.EncodeToString(serverKey), @@ -44,41 +58,94 @@ func installTLSSecret(namespace string, serverCrt []byte, serverKey []byte, root return utils.Errorf(err, L("Failed to create uyuni-crt TLS secret")) } - createCaConfig(namespace, rootCaCrt) + // Copy the CA cert into uyuni-ca config map as the container shouldn't have the CA secret + createCaConfig(helmFlags.Uyuni.Namespace, rootCaCrt) return nil } -// Install cert-manager and its CRDs using helm in the cert-manager namespace if needed -// and then create a self-signed CA and issuers. -// Returns helm arguments to be added to use the issuer. -func installSSLIssuers(helmFlags *cmd_utils.HelmFlags, sslFlags *cmd_utils.InstallSSLFlags, rootCa string, - tlsCert *types.SSLPair, kubeconfig, fqdn string, imagePullPolicy string) ([]string, error) { +// DeployReusedCaCertificate deploys an existing SSL CA using cert-manager. +func DeployReusedCa( + helmFlags *cmd_utils.HelmFlags, + ca *types.SSLPair, + kubeconfig string, + imagePullPolicy string, +) ([]string, error) { + helmArgs := []string{} + // Install cert-manager if needed if err := installCertManager(helmFlags, kubeconfig, imagePullPolicy); err != nil { return []string{}, utils.Errorf(err, L("cannot install cert manager")) } - log.Info().Msg(L("Creating SSL certificate issuer")) - crdsDir, cleaner, err := utils.TempDir() + log.Info().Msg(L("Creating cert-manager issuer for existing CA")) + tempDir, cleaner, err := utils.TempDir() if err != nil { return []string{}, err } defer cleaner() - issuerPath := filepath.Join(crdsDir, "issuer.yaml") + issuerPath := filepath.Join(tempDir, "issuer.yaml") - issuerData := templates.IssuerTemplateData{ + issuerData := templates.ReusedCaIssuerTemplateData{ Namespace: helmFlags.Uyuni.Namespace, - Country: sslFlags.Country, - State: sslFlags.State, - City: sslFlags.City, - Org: sslFlags.Org, - OrgUnit: sslFlags.OU, - Email: sslFlags.Email, - Fqdn: fqdn, - RootCa: rootCa, - Key: tlsCert.Key, - Certificate: tlsCert.Cert, + Key: ca.Key, + Certificate: ca.Cert, + } + + if err = utils.WriteTemplateToFile(issuerData, issuerPath, 0500, true); err != nil { + return []string{}, utils.Errorf(err, L("failed to generate issuer definition")) + } + + err = utils.RunCmd("kubectl", "apply", "-f", issuerPath) + if err != nil { + log.Fatal().Err(err).Msg(L("Failed to create issuer")) + } + + // Wait for issuer to be ready + if err := waitForIssuer(helmFlags.Uyuni.Namespace, "uyuni-ca-issuer"); err != nil { + return nil, err + } + helmArgs = append(helmArgs, "--set-json", ingressCertManagerAnnotation) + + // Copy the CA cert into uyuni-ca config map as the container shouldn't have the CA secret + createCaConfig(helmFlags.Uyuni.Namespace, []byte(ca.Cert)) + + return helmArgs, nil +} + +// DeployGenerateCa deploys a new SSL CA using cert-manager. +func DeployCertificate( + helmFlags *cmd_utils.HelmFlags, + sslFlags *cmd_utils.InstallSSLFlags, + kubeconfig string, + fqdn string, + imagePullPolicy string, +) ([]string, error) { + helmArgs := []string{} + + // Install cert-manager if needed + if err := installCertManager(helmFlags, kubeconfig, imagePullPolicy); err != nil { + return []string{}, utils.Errorf(err, L("cannot install cert manager")) + } + + log.Info().Msg(L("Creating SSL certificate issuer")) + tempDir, err := os.MkdirTemp("", "mgradm-*") + if err != nil { + return []string{}, utils.Errorf(err, L("failed to create temporary directory")) + } + defer os.RemoveAll(tempDir) + + issuerPath := filepath.Join(tempDir, "issuer.yaml") + + issuerData := templates.GeneratedCaIssuerTemplateData{ + Namespace: helmFlags.Uyuni.Namespace, + Country: sslFlags.Country, + State: sslFlags.State, + City: sslFlags.City, + Org: sslFlags.Org, + OrgUnit: sslFlags.OU, + Email: sslFlags.Email, + Fqdn: fqdn, } if err = utils.WriteTemplateToFile(issuerData, issuerPath, 0500, true); err != nil { @@ -91,16 +158,32 @@ func installSSLIssuers(helmFlags *cmd_utils.HelmFlags, sslFlags *cmd_utils.Insta } // Wait for issuer to be ready + if err := waitForIssuer(helmFlags.Uyuni.Namespace, "uyuni-ca-issuer"); err != nil { + return nil, err + } + helmArgs = append(helmArgs, "--set-json", ingressCertManagerAnnotation) + + // Extract the CA cert into uyuni-ca config map as the container shouldn't have the CA secret + extractCaCertToConfig(helmFlags.Uyuni.Namespace) + + return helmArgs, nil +} + +// Wait for issuer to be ready. +func waitForIssuer(namespace string, name string) error { for i := 0; i < 60; i++ { - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "get", "-o=jsonpath={.status.conditions[*].type}", - "issuer", "uyuni-ca-issuer", "-n", issuerData.Namespace) + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", + "-o=jsonpath={.status.conditions[*].type}", + "-n", namespace, + "issuer", name, + ) if err == nil && string(out) == "Ready" { - return []string{"--set-json", "ingressSSLAnnotations={\"cert-manager.io/issuer\": \"uyuni-ca-issuer\"}"}, nil + return nil } time.Sleep(1 * time.Second) } - log.Fatal().Msg(L("Issuer didn't turn ready after 60s")) - return []string{}, nil + return errors.New(L("Issuer didn't turn ready after 60s")) } func installCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, imagePullPolicy string) error { diff --git a/mgradm/shared/kubernetes/install.go b/mgradm/shared/kubernetes/install.go index d13a1a303..f4c70a59a 100644 --- a/mgradm/shared/kubernetes/install.go +++ b/mgradm/shared/kubernetes/install.go @@ -15,7 +15,6 @@ import ( "github.com/uyuni-project/uyuni-tools/shared" "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" - "github.com/uyuni-project/uyuni-tools/shared/ssl" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) @@ -74,46 +73,6 @@ func Deploy( return cnx.WaitForServer() } -// DeployCertificate deploys a new SSL certificate. -func DeployCertificate(helmFlags *cmd_utils.HelmFlags, sslFlags *cmd_utils.InstallSSLFlags, rootCa string, - ca *types.SSLPair, kubeconfig string, fqdn string, imagePullPolicy string) ([]string, error) { - helmArgs := []string{} - if sslFlags.UseExisting() { - if err := DeployExistingCertificate(helmFlags, sslFlags); err != nil { - return helmArgs, err - } - } else { - // Install cert-manager and a self-signed issuer ready for use - issuerArgs, err := installSSLIssuers(helmFlags, sslFlags, rootCa, ca, kubeconfig, fqdn, imagePullPolicy) - if err != nil { - return []string{}, utils.Errorf(err, L("cannot install cert-manager and self-sign issuer")) - } - helmArgs = append(helmArgs, issuerArgs...) - - // Extract the CA cert into uyuni-ca config map as the container shouldn't have the CA secret - extractCaCertToConfig(helmFlags.Uyuni.Namespace) - } - - return helmArgs, nil -} - -// DeployExistingCertificate execute a deploy of an existing certificate. -func DeployExistingCertificate( - helmFlags *cmd_utils.HelmFlags, - sslFlags *cmd_utils.InstallSSLFlags, -) error { - // Deploy the SSL Certificate secret and CA configmap - serverCrt, rootCaCrt := ssl.OrderCas(&sslFlags.Ca, &sslFlags.Server) - serverKey := utils.ReadFile(sslFlags.Server.Key) - if err := installTLSSecret(helmFlags.Uyuni.Namespace, serverCrt, serverKey, rootCaCrt); err != nil { - return err - } - - // Extract the CA cert into uyuni-ca config map as the container shouldn't have the CA secret - extractCaCertToConfig(helmFlags.Uyuni.Namespace) - return nil -} - // UyuniUpgrade runs an helm upgrade using images and helm configuration as parameters. func UyuniUpgrade( serverImage string, diff --git a/mgradm/shared/templates/issuerTemplate.go b/mgradm/shared/templates/issuerTemplate.go index 733c9531c..614d20460 100644 --- a/mgradm/shared/templates/issuerTemplate.go +++ b/mgradm/shared/templates/issuerTemplate.go @@ -10,27 +10,7 @@ import ( ) // Deploy self-signed issuer or CA Certificate and key. -const issuerTemplate = ` -apiVersion: v1 -kind: Namespace -metadata: - name: {{ .Namespace }} - labels: - name: {{ .Namespace }} ---- -{{if and .Certificate .Key -}} -apiVersion: v1 -kind: Secret -type: kubernetes.io/tls -metadata: - name: uyuni-ca - namespace: {{ .Namespace }} -data: - ca.crt: {{ .RootCa }} - tls.crt: {{ .Certificate }} - tls.key: {{ .Key }} -{{- else }} -apiVersion: cert-manager.io/v1 +const generatedCaIssuerTemplate = `apiVersion: cert-manager.io/v1 kind: Issuer metadata: name: uyuni-issuer @@ -78,36 +58,23 @@ spec: name: uyuni-issuer kind: Issuer group: cert-manager.io -{{- end }} --- -apiVersion: cert-manager.io/v1 -kind: Issuer -metadata: - name: uyuni-ca-issuer - namespace: {{ .Namespace }} -spec: - ca: - secretName: - uyuni-ca ` -// IssuerTemplateData represents information used to create issuer file. -type IssuerTemplateData struct { - Namespace string - Country string - State string - City string - Org string - OrgUnit string - Email string - Fqdn string - RootCa string - Certificate string - Key string +// GeneratedCaIssuerTemplateData is a template to render cert-manager issuers for a generated self-signed CA. +type GeneratedCaIssuerTemplateData struct { + Namespace string + Country string + State string + City string + Org string + OrgUnit string + Email string + Fqdn string } // Render creates issuer file. -func (data IssuerTemplateData) Render(wr io.Writer) error { - t := template.Must(template.New("issuer").Parse(issuerTemplate)) +func (data GeneratedCaIssuerTemplateData) Render(wr io.Writer) error { + t := template.Must(template.New("issuer").Parse(generatedCaIssuerTemplate + uyuniCaIssuer)) return t.Execute(wr, data) } diff --git a/mgradm/shared/templates/reusedCaIssuerTemplate.go b/mgradm/shared/templates/reusedCaIssuerTemplate.go new file mode 100644 index 000000000..4bf514b5a --- /dev/null +++ b/mgradm/shared/templates/reusedCaIssuerTemplate.go @@ -0,0 +1,46 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package templates + +import ( + "io" + "text/template" +) + +const uyuniCaIssuer = `apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: uyuni-ca-issuer + namespace: {{ .Namespace }} +spec: + ca: + secretName: uyuni-ca +` + +const reusedCaIssuerTemplate = `apiVersion: v1 +kind: Secret +type: kubernetes.io/tls +metadata: + name: uyuni-ca + namespace: {{ .Namespace }} +data: + ca.crt: {{ .Certificate }} + tls.crt: {{ .Certificate }} + tls.key: {{ .Key }} +--- +` + +// ReusedCaIssuerTemplateData is a template to render cert-manager issuer from an existing root CA. +type ReusedCaIssuerTemplateData struct { + Namespace string + Certificate string + Key string +} + +// Render creates issuer file. +func (data ReusedCaIssuerTemplateData) Render(wr io.Writer) error { + t := template.Must(template.New("issuer").Parse(reusedCaIssuerTemplate + uyuniCaIssuer)) + return t.Execute(wr, data) +} From 4834630a130c81a9be54e75b7fcf6e2402bd2126 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Fri, 27 Sep 2024 13:33:18 +0200 Subject: [PATCH 07/19] Ports internal data description refactoring In the kubernetes world we need to link the ports to services. For now we only have a TCP and an UDP service for server and the same for proxy, but in the short term, we will need more services to allow splitting into multiple pods. This refactoring is preparing this split. --- mgradm/shared/kubernetes/install.go | 30 ++++-- mgradm/shared/kubernetes/k3s.go | 12 +-- mgradm/shared/podman/podman.go | 15 +-- mgrpxy/cmd/install/kubernetes/utils.go | 9 +- mgrpxy/shared/podman/podman.go | 2 +- shared/kubernetes/k3s.go | 58 ++++++++-- shared/kubernetes/k3sTraefikTemplate.go | 16 ++- shared/kubernetes/k3s_test.go | 26 +++++ shared/kubernetes/rke2.go | 21 +++- shared/types/networks.go | 1 + shared/utils/ports.go | 134 ++++++++++++++++++------ shared/utils/ports_test.go | 22 ++++ 12 files changed, 259 insertions(+), 87 deletions(-) create mode 100644 shared/kubernetes/k3s_test.go create mode 100644 shared/utils/ports_test.go diff --git a/mgradm/shared/kubernetes/install.go b/mgradm/shared/kubernetes/install.go index f4c70a59a..c39a613df 100644 --- a/mgradm/shared/kubernetes/install.go +++ b/mgradm/shared/kubernetes/install.go @@ -22,6 +22,25 @@ import ( // HelmAppName is the Helm application name. const HelmAppName = "uyuni" +// DeployNodeConfig deploy configuration files on the node. +func DeployNodeConfig( + namespace string, + clusterInfos *kubernetes.ClusterInfos, + needsHub bool, + debug bool, +) error { + // If installing on k3s, install the traefik helm config in manifests + isK3s := clusterInfos.IsK3s() + IsRke2 := clusterInfos.IsRke2() + ports := GetPortLists(needsHub, debug) + if isK3s { + return kubernetes.InstallK3sTraefikConfig(ports) + } else if IsRke2 { + return kubernetes.InstallRke2NginxConfig(ports, namespace) + } + return nil +} + // Deploy execute a deploy of a given image and helm to a cluster. func Deploy( cnx *shared.Connection, @@ -36,14 +55,11 @@ func Deploy( helmArgs ...string, ) error { // If installing on k3s, install the traefik helm config in manifests - isK3s := clusterInfos.IsK3s() - IsRke2 := clusterInfos.IsRke2() if !prepare { - tcpPorts, udpPorts := GetPortLists(hubXmlrpcFlags.Replicas > 0, debug) - if isK3s { - kubernetes.InstallK3sTraefikConfig(tcpPorts, udpPorts) - } else if IsRke2 { - kubernetes.InstallRke2NginxConfig(tcpPorts, udpPorts, helmFlags.Uyuni.Namespace) + if err := DeployNodeConfig( + helmFlags.Uyuni.Namespace, clusterInfos, hubXmlrpcFlags.Replicas > 0, debug, + ); err != nil { + return err } } diff --git a/mgradm/shared/kubernetes/k3s.go b/mgradm/shared/kubernetes/k3s.go index 81a4722f5..ca756fa9f 100644 --- a/mgradm/shared/kubernetes/k3s.go +++ b/mgradm/shared/kubernetes/k3s.go @@ -16,17 +16,13 @@ import ( ) // GetPortLists returns compiled lists of tcp and udp ports.. -func GetPortLists(hub bool, debug bool) ([]types.PortMap, []types.PortMap) { - tcpPorts := []types.PortMap{} - tcpPorts = append(tcpPorts, utils.TCPPorts...) - if debug { - tcpPorts = append(tcpPorts, utils.DebugPorts...) - } +func GetPortLists(hub bool, debug bool) []types.PortMap { + ports := utils.GetServerPorts(debug) if hub { - tcpPorts = append(tcpPorts, utils.HubXmlrpcPorts...) + ports = append(ports, utils.HubXmlrpcPorts...) } - return tcpPorts, utils.UDPPorts + return ports } // RunPgsqlVersionUpgrade perform a PostgreSQL major upgrade. diff --git a/mgradm/shared/podman/podman.go b/mgradm/shared/podman/podman.go index 73a6d2adc..6c488e3d8 100644 --- a/mgradm/shared/podman/podman.go +++ b/mgradm/shared/podman/podman.go @@ -29,18 +29,9 @@ import ( // GetExposedPorts returns the port exposed. func GetExposedPorts(debug bool) []types.PortMap { - ports := []types.PortMap{ - utils.NewPortMap("https", 443, 443), - utils.NewPortMap("http", 80, 80), - } - ports = append(ports, utils.TCPPorts...) + ports := utils.GetServerPorts(debug) + ports = append(ports, utils.NewPortMap(utils.ServerTCPServiceName, "https", 443, 443)) ports = append(ports, utils.TCPPodmanPorts...) - ports = append(ports, utils.UDPPorts...) - - if debug { - ports = append(ports, utils.DebugPorts...) - } - return ports } @@ -56,7 +47,7 @@ func GenerateServerSystemdService(mirrorPath string, debug bool) error { ports := GetExposedPorts(debug) if _, err := exec.LookPath("csp-billing-adapter"); err == nil { - ports = append(ports, utils.NewPortMap("csp-billing", 18888, 18888)) + ports = append(ports, utils.NewPortMap("csp", "csp-billing", 18888, 18888)) args = append(args, "-e ISPAYG=1") } diff --git a/mgrpxy/cmd/install/kubernetes/utils.go b/mgrpxy/cmd/install/kubernetes/utils.go index 3d966ffc4..fe8ce6f0c 100644 --- a/mgrpxy/cmd/install/kubernetes/utils.go +++ b/mgrpxy/cmd/install/kubernetes/utils.go @@ -49,11 +49,14 @@ func installForKubernetes(_ *types.GlobalFlags, // If installing on k3s, install the traefik helm config in manifests isK3s := clusterInfos.IsK3s() IsRke2 := clusterInfos.IsRke2() + ports := shared_utils.GetProxyPorts() if isK3s { - shared_kubernetes.InstallK3sTraefikConfig(shared_utils.ProxyTCPPorts, shared_utils.UDPPorts) + err = shared_kubernetes.InstallK3sTraefikConfig(ports) } else if IsRke2 { - shared_kubernetes.InstallRke2NginxConfig(shared_utils.ProxyTCPPorts, shared_utils.UDPPorts, - flags.Helm.Proxy.Namespace) + err = shared_kubernetes.InstallRke2NginxConfig(ports, flags.Helm.Proxy.Namespace) + } + if err != nil { + return err } helmArgs := []string{"--set", "ingress=" + clusterInfos.Ingress} diff --git a/mgrpxy/shared/podman/podman.go b/mgrpxy/shared/podman/podman.go index d64c39951..56c0f6108 100644 --- a/mgrpxy/shared/podman/podman.go +++ b/mgrpxy/shared/podman/podman.go @@ -53,7 +53,7 @@ func GenerateSystemdService( ports := []types.PortMap{} ports = append(ports, shared_utils.ProxyTCPPorts...) ports = append(ports, shared_utils.ProxyPodmanPorts...) - ports = append(ports, shared_utils.UDPPorts...) + ports = append(ports, shared_utils.TftpPorts...) // Pod dataPod := templates.PodTemplateData{ diff --git a/shared/kubernetes/k3s.go b/shared/kubernetes/k3s.go index 64227ba1f..c9abc82b6 100644 --- a/shared/kubernetes/k3s.go +++ b/shared/kubernetes/k3s.go @@ -5,6 +5,7 @@ package kubernetes import ( + "errors" "fmt" "os" "os/exec" @@ -18,25 +19,58 @@ import ( "github.com/uyuni-project/uyuni-tools/shared/utils" ) -const k3sTraefikConfigPath = "/var/lib/rancher/k3s/server/manifests/k3s-traefik-config.yaml" +const k3sTraefikConfigPath = "/var/lib/rancher/k3s/server/manifests/uyuni-traefik-config.yaml" // InstallK3sTraefikConfig install K3s Traefik configuration. -func InstallK3sTraefikConfig(tcpPorts []types.PortMap, udpPorts []types.PortMap) { +func InstallK3sTraefikConfig(ports []types.PortMap) error { log.Info().Msg(L("Installing K3s Traefik configuration")) + endpoints := []types.PortMap{} + for _, port := range ports { + port.Name = GetTraefikEndpointName(port) + endpoints = append(endpoints, port) + } data := K3sTraefikConfigTemplateData{ - TCPPorts: tcpPorts, - UDPPorts: udpPorts, + Ports: endpoints, } - if err := utils.WriteTemplateToFile(data, k3sTraefikConfigPath, 0600, false); err != nil { - log.Fatal().Err(err).Msgf(L("Failed to write K3s Traefik configuration")) + if err := utils.WriteTemplateToFile(data, k3sTraefikConfigPath, 0600, true); err != nil { + return utils.Errorf(err, L("Failed to write Traefik configuration")) } // Wait for traefik to be back - waitForTraefik() + return waitForTraefik() } -func waitForTraefik() { +// GetTraefikEndpointName computes the traefik endpoint name from the service and port names. +// Those names should be less than 15 characters long. +func GetTraefikEndpointName(portmap types.PortMap) string { + svc := shortenName(portmap.Service) + name := shortenName(portmap.Name) + if name != svc { + return fmt.Sprintf("%s-%s", svc, name) + } + return name +} + +func shortenName(name string) string { + shorteningMap := map[string]string{ + "taskomatic": "tasko", + "metrics": "mtrx", + "postgresql": "pgsql", + "exporter": "xport", + "uyuni-tcp": "uyuni", + "uyuni-udp": "uyuni", + "uyuni-proxy-tcp": "uyuni", + "uyuni-proxy-udp": "uyuni", + } + short := shorteningMap[name] + if short == "" { + short = name + } + return short +} + +func waitForTraefik() error { log.Info().Msg(L("Waiting for Traefik to be reloaded")) for i := 0; i < 60; i++ { out, err := utils.RunCmdOutput(zerolog.TraceLevel, "kubectl", "get", "job", "-n", "kube-system", @@ -44,12 +78,12 @@ func waitForTraefik() { if err == nil { completionTime, err := time.Parse(time.RFC3339, string(out)) if err == nil && time.Since(completionTime).Seconds() < 60 { - return + return nil } } time.Sleep(1 * time.Second) } - log.Error().Msg(L("Failed to reload K3s Traefik")) + return errors.New(L("Failed to reload Traefik")) } // UninstallK3sTraefikConfig uninstall K3s Traefik configuration. @@ -62,7 +96,9 @@ func UninstallK3sTraefikConfig(dryRun bool) { log.Error().Err(err).Msg(L("failed to write empty traefik configuration")) } else { // Wait for traefik to be back - waitForTraefik() + if err := waitForTraefik(); err != nil { + log.Error().Err(err).Msg(L("failed to uninstall traefik configuration")) + } } } else { log.Info().Msg(L("Would reinstall Traefik without additionnal configuration")) diff --git a/shared/kubernetes/k3sTraefikTemplate.go b/shared/kubernetes/k3sTraefikTemplate.go index ac94f6acf..931a9960e 100644 --- a/shared/kubernetes/k3sTraefikTemplate.go +++ b/shared/kubernetes/k3sTraefikTemplate.go @@ -19,26 +19,22 @@ metadata: spec: valuesContent: |- ports: -{{- range .TCPPorts }} - {{ .Name }}: - port: {{ .Port }} - expose: true - exposedPort: {{ .Exposed }} - protocol: TCP -{{- end }} -{{- range .UDPPorts }} +{{- range .Ports }} {{ .Name }}: port: {{ .Port }} expose: true exposedPort: {{ .Exposed }} + {{- if eq .Protocol "udp" }} protocol: UDP + {{- else }} + protocol: TCP + {{- end }} {{- end }} ` // K3sTraefikConfigTemplateData represents information used to create K3s Traefik helm chart. type K3sTraefikConfigTemplateData struct { - TCPPorts []types.PortMap - UDPPorts []types.PortMap + Ports []types.PortMap } // Render will create the helm chart configuation for K3sTraefik. diff --git a/shared/kubernetes/k3s_test.go b/shared/kubernetes/k3s_test.go new file mode 100644 index 000000000..a7d0ed1a3 --- /dev/null +++ b/shared/kubernetes/k3s_test.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "testing" + + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// Test that the generated endpoints are valid for traefik. +func Test_GetTraefikEndpointName(t *testing.T) { + ports := utils.GetServerPorts(true) + ports = append(ports, utils.HubXmlrpcPorts...) + ports = append(ports, utils.GetProxyPorts()...) + + for _, port := range ports { + actual := GetTraefikEndpointName(port) + // Traefik would fail if the name is longer than 15 characters + if len(actual) > 15 { + t.Errorf("Traefik endpoint name has more than 15 characters: %s", actual) + } + } +} diff --git a/shared/kubernetes/rke2.go b/shared/kubernetes/rke2.go index 9a1993a9a..f43bb323d 100644 --- a/shared/kubernetes/rke2.go +++ b/shared/kubernetes/rke2.go @@ -14,19 +14,29 @@ import ( "github.com/uyuni-project/uyuni-tools/shared/utils" ) -const rke2NginxConfigPath = "/var/lib/rancher/rke2/server/manifests/rke2-ingress-nginx-config.yaml" +const rke2NginxConfigPath = "/var/lib/rancher/rke2/server/manifests/uyuni-ingress-nginx-config.yaml" -// InstallRke2NginxConfig install Rke2 Nginx configuration. -func InstallRke2NginxConfig(tcpPorts []types.PortMap, udpPorts []types.PortMap, namespace string) { +// InstallRke2NgixConfig install Rke2 Nginx configuration. +func InstallRke2NginxConfig(ports []types.PortMap, namespace string) error { log.Info().Msg(L("Installing RKE2 Nginx configuration")) + tcpPorts := []types.PortMap{} + udpPorts := []types.PortMap{} + for _, port := range ports { + if port.Protocol == "udp" { + udpPorts = append(udpPorts, port) + } else { + tcpPorts = append(tcpPorts, port) + } + } + data := Rke2NginxConfigTemplateData{ Namespace: namespace, TCPPorts: tcpPorts, UDPPorts: udpPorts, } - if err := utils.WriteTemplateToFile(data, rke2NginxConfigPath, 0600, false); err != nil { - log.Fatal().Err(err).Msgf(L("Failed to write Rke2 nginx configuration")) + if err := utils.WriteTemplateToFile(data, rke2NginxConfigPath, 0600, true); err != nil { + return utils.Errorf(err, L("Failed to write Rke2 nginx configuration")) } // Wait for the nginx controller to be back @@ -40,6 +50,7 @@ func InstallRke2NginxConfig(tcpPorts []types.PortMap, udpPorts []types.PortMap, } } } + return nil } // UninstallRke2NginxConfig uninstall Rke2 Nginx configuration. diff --git a/shared/types/networks.go b/shared/types/networks.go index 30e895e81..b94f18ab6 100644 --- a/shared/types/networks.go +++ b/shared/types/networks.go @@ -6,6 +6,7 @@ package types // PortMap describes a port. type PortMap struct { + Service string Name string Exposed int Port int diff --git a/shared/utils/ports.go b/shared/utils/ports.go index 6cd7e7402..6adc0b5d3 100644 --- a/shared/utils/ports.go +++ b/shared/utils/ports.go @@ -6,45 +6,72 @@ package utils import "github.com/uyuni-project/uyuni-tools/shared/types" +// ServerTCPServiceName is the name of the server TCP service. +const ServerTCPServiceName = "uyuni-tcp" + +// ServerUDPServiceName is the name of the server UDP service. +const ServerUDPServiceName = "uyuni-udp" + +// ProxyTCPServiceName is the name of the proxy TCP service. +const ProxyTCPServiceName = "uyuni-proxy-tcp" + +// ProxyUDPServiceName is the name of the proxy UDP service. +const ProxyUDPServiceName = "uyuni-proxy-udp" + // NewPortMap is a constructor for PortMap type. -func NewPortMap(name string, exposed int, port int) types.PortMap { +func NewPortMap(service string, name string, exposed int, port int) types.PortMap { return types.PortMap{ + Service: service, Name: name, Exposed: exposed, Port: port, } } -// TCPPorts are the tcp ports required by the server -// The port names should be less than 15 characters long and lowercased for traefik to eat them. -var TCPPorts = []types.PortMap{ - NewPortMap("postgres", 5432, 5432), - NewPortMap("salt-publish", 4505, 4505), - NewPortMap("salt-request", 4506, 4506), - NewPortMap("cobbler", 25151, 25151), - NewPortMap("psql-mtrx", 9187, 9187), - NewPortMap("tasko-jmx-mtrx", 5556, 5556), - NewPortMap("tomcat-jmx-mtrx", 5557, 5557), - NewPortMap("tasko-mtrx", 9800, 9800), +// WebPorts is the list of ports for the server web service. +var WebPorts = []types.PortMap{ + NewPortMap(ServerTCPServiceName, "http", 80, 80), } -// TCPPodmanPorts are the tcp ports required by the server on podman. -var TCPPodmanPorts = []types.PortMap{ - // TODO: Replace Node exporter with cAdvisor - NewPortMap("node-exporter", 9100, 9100), +// PgsqlPorts is the list of ports for the server report db service. +var PgsqlPorts = []types.PortMap{ + NewPortMap(ServerTCPServiceName, "pgsql", 5432, 5432), + NewPortMap(ServerTCPServiceName, "exporter", 9187, 9187), +} + +// SaltPorts is the list of ports for the server salt service. +var SaltPorts = []types.PortMap{ + NewPortMap(ServerTCPServiceName, "publish", 4505, 4505), + NewPortMap(ServerTCPServiceName, "request", 4506, 4506), } -// DebugPorts are the port used by dev for debugging applications. -var DebugPorts = []types.PortMap{ - // We can't expose on port 8000 since traefik already uses it - NewPortMap("tomcat-debug", 8003, 8003), - NewPortMap("tasko-debug", 8001, 8001), - NewPortMap("search-debug", 8002, 8002), +// CobblerPorts is the list of ports for the server cobbler service. +var CobblerPorts = []types.PortMap{ + NewPortMap(ServerTCPServiceName, "cobbler", 25151, 25151), } -// UDPPorts are the udp ports required by the server. -var UDPPorts = []types.PortMap{ +// TaskoPorts is the list of ports for the server taskomatic service. +var TaskoPorts = []types.PortMap{ + NewPortMap(ServerTCPServiceName, "jmx", 5556, 5556), + NewPortMap(ServerTCPServiceName, "mtrx", 9800, 9800), + NewPortMap(ServerTCPServiceName, "debug", 8001, 8001), +} + +// TomcatPorts is the list of ports for the server tomcat service. +var TomcatPorts = []types.PortMap{ + NewPortMap(ServerTCPServiceName, "jmx", 5557, 5557), + NewPortMap(ServerTCPServiceName, "debug", 8003, 8003), +} + +// SearchPorts is the list of ports for the server search service. +var SearchPorts = []types.PortMap{ + NewPortMap(ServerTCPServiceName, "debug", 8002, 8002), +} + +// TftpPorts is the list of ports for the server tftp service. +var TftpPorts = []types.PortMap{ { + Service: ServerUDPServiceName, Name: "tftp", Exposed: 69, Port: 69, @@ -52,20 +79,67 @@ var UDPPorts = []types.PortMap{ }, } +// GetServerPorts returns all the server container ports. +// +// if debug is set to true, the debug ports are added to the list. +func GetServerPorts(debug bool) []types.PortMap { + ports := []types.PortMap{} + ports = appendPorts(ports, debug, WebPorts...) + ports = appendPorts(ports, debug, PgsqlPorts...) + ports = appendPorts(ports, debug, SaltPorts...) + ports = appendPorts(ports, debug, CobblerPorts...) + ports = appendPorts(ports, debug, TaskoPorts...) + ports = appendPorts(ports, debug, TomcatPorts...) + ports = appendPorts(ports, debug, SearchPorts...) + ports = appendPorts(ports, debug, TftpPorts...) + + return ports +} + +func appendPorts(ports []types.PortMap, debug bool, newPorts ...types.PortMap) []types.PortMap { + for _, newPort := range newPorts { + if debug || newPort.Name != "debug" && !debug { + ports = append(ports, newPort) + } + } + return ports +} + +// TCPPodmanPorts are the tcp ports required by the server on podman. +var TCPPodmanPorts = []types.PortMap{ + // TODO: Replace Node exporter with cAdvisor + NewPortMap("tomcat", "node-exporter", 9100, 9100), +} + // HubXmlrpcPorts are the tcp ports required by the Hub XMLRPC API service. var HubXmlrpcPorts = []types.PortMap{ - NewPortMap("hub-xmlrpc", 2830, 2830), + NewPortMap(ServerTCPServiceName, "xmlrpc", 2830, 2830), } // ProxyTCPPorts are the tcp ports required by the proxy. var ProxyTCPPorts = []types.PortMap{ - NewPortMap("ssh", 8022, 22), - NewPortMap("salt-publish", 4505, 4505), - NewPortMap("salt-request", 4506, 4506), + NewPortMap(ProxyTCPServiceName, "ssh", 8022, 22), + NewPortMap(ProxyTCPServiceName, "publish", 4505, 4505), + NewPortMap(ProxyTCPServiceName, "request", 4506, 4506), } // ProxyPodmanPorts are the http/s ports required by the proxy. var ProxyPodmanPorts = []types.PortMap{ - NewPortMap("https", 443, 443), - NewPortMap("http", 80, 80), + NewPortMap(ProxyTCPServiceName, "https", 443, 443), + NewPortMap(ProxyTCPServiceName, "http", 80, 80), +} + +// GetProxyPorts returns all the proxy container ports. +func GetProxyPorts() []types.PortMap { + ports := []types.PortMap{} + ports = appendPorts(ports, false, ProxyTCPPorts...) + ports = appendPorts(ports, false, types.PortMap{ + Service: ProxyUDPServiceName, + Name: "tftp", + Exposed: 69, + Port: 69, + Protocol: "udp", + }) + + return ports } diff --git a/shared/utils/ports_test.go b/shared/utils/ports_test.go new file mode 100644 index 000000000..fec5fa807 --- /dev/null +++ b/shared/utils/ports_test.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "testing" + + "github.com/uyuni-project/uyuni-tools/shared/testutils" +) + +func TestGetServerPorts(t *testing.T) { + allPorts := len(WebPorts) + len(PgsqlPorts) + len(SaltPorts) + len(CobblerPorts) + + len(TaskoPorts) + len(TomcatPorts) + len(SearchPorts) + len(TftpPorts) + + ports := GetServerPorts(false) + testutils.AssertEquals(t, "Wrong number of ports without debug ones", allPorts-3, len(ports)) + + ports = GetServerPorts(true) + testutils.AssertEquals(t, "Wrong number of ports with debug ones", allPorts, len(ports)) +} From 7c9c3b56038962db80d8d3d642bb865eceaa45f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 23 Oct 2024 13:32:04 +0200 Subject: [PATCH 08/19] Refactor the upgrade SanityCheck to not exec in a running container Running commands in a running container only works if there is a running container and is harder to unit test. In order to help sharing code for Kubernetes, the SanityCheck now gets the existing deployment version with inspecting its image. This also helps adding unit tests for those checks. --- mgradm/shared/kubernetes/deployment.go | 34 ++++++ mgradm/shared/kubernetes/deployment_test.go | 36 ++++++ mgradm/shared/kubernetes/install.go | 12 +- mgradm/shared/podman/podman.go | 11 +- mgradm/shared/utils/exec.go | 115 +++++++++----------- mgradm/shared/utils/exec_test.go | 82 ++++++++++++++ shared/utils/utils_test.go | 6 + 7 files changed, 227 insertions(+), 69 deletions(-) create mode 100644 mgradm/shared/kubernetes/deployment.go create mode 100644 mgradm/shared/kubernetes/deployment_test.go create mode 100644 mgradm/shared/utils/exec_test.go diff --git a/mgradm/shared/kubernetes/deployment.go b/mgradm/shared/kubernetes/deployment.go new file mode 100644 index 000000000..9347fb3ae --- /dev/null +++ b/mgradm/shared/kubernetes/deployment.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "strings" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// ServerDeployName is the name of the server deployment. +const ServerDeployName = "uyuni" + +var runCmdOutput = utils.RunCmdOutput + +// getRunningServerImage extracts the main server container image from a running deployment. +func getRunningServerImage(namespace string) string { + out, err := runCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "deploy", "-n", namespace, ServerDeployName, + "-o", "jsonpath={.spec.template.spec.containers[0].image}", + ) + if err != nil { + // Errors could be that the namespace or deployment doesn't exist, just return no image. + log.Debug().Err(err).Msg("failed to get the running server container image") + return "" + } + return strings.TrimSpace(string(out)) +} diff --git a/mgradm/shared/kubernetes/deployment_test.go b/mgradm/shared/kubernetes/deployment_test.go new file mode 100644 index 000000000..7df05cabd --- /dev/null +++ b/mgradm/shared/kubernetes/deployment_test.go @@ -0,0 +1,36 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "errors" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/testutils" +) + +func TestGetRunningServerImage(t *testing.T) { + type dataType struct { + err error + out string + expected string + } + data := []dataType{ + {nil, "registry.opensuse.org/uyuni/server:latest\n", "registry.opensuse.org/uyuni/server:latest"}, + {errors.New("deployment not found"), "", ""}, + } + + for i, test := range data { + runCmdOutput = func(logLevel zerolog.Level, command string, args ...string) ([]byte, error) { + return []byte(test.out), test.err + } + actual := getRunningServerImage("myns") + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected result", i), test.expected, actual) + } +} diff --git a/mgradm/shared/kubernetes/install.go b/mgradm/shared/kubernetes/install.go index c39a613df..e08820d7e 100644 --- a/mgradm/shared/kubernetes/install.go +++ b/mgradm/shared/kubernetes/install.go @@ -2,6 +2,8 @@ // // SPDX-License-Identifier: Apache-2.0 +//go:build !nok8s + package kubernetes import ( @@ -170,7 +172,15 @@ func Upgrade( return utils.Errorf(err, L("cannot inspect kubernetes values")) } - err = cmd_utils.SanityCheck(cnx, inspectedValues, serverImage) + var runningData *utils.ServerInspectData + if runningImage := getRunningServerImage(namespace); runningImage != "" { + runningData, err = kubernetes.InspectKubernetes(namespace, runningImage, "Never") + if err != nil { + return err + } + } + + err = cmd_utils.SanityCheck(runningData, inspectedValues, serverImage) if err != nil { return err } diff --git a/mgradm/shared/podman/podman.go b/mgradm/shared/podman/podman.go index 6c488e3d8..213aac5ca 100644 --- a/mgradm/shared/podman/podman.go +++ b/mgradm/shared/podman/podman.go @@ -374,9 +374,16 @@ func Upgrade( return utils.Errorf(err, L("cannot inspect podman values")) } - cnx := shared.NewConnection("podman", podman.ServerContainerName, "") + runningImage := podman.GetServiceImage(podman.ServerService) + var runningData *utils.ServerInspectData + if runningImage != "" { + runningData, err = Inspect(runningImage) + if err != nil { + return err + } + } - if err := adm_utils.SanityCheck(cnx, inspectedValues, preparedImage); err != nil { + if err := adm_utils.SanityCheck(runningData, inspectedValues, preparedImage); err != nil { return err } diff --git a/mgradm/shared/utils/exec.go b/mgradm/shared/utils/exec.go index d4cd9878b..55fb5b96c 100644 --- a/mgradm/shared/utils/exec.go +++ b/mgradm/shared/utils/exec.go @@ -137,86 +137,69 @@ func GenerateMigrationScript(sourceFqdn string, user string, kubernetes bool, pr } // SanityCheck verifies if an upgrade can be run. -func SanityCheck(cnx *shared.Connection, inspectedValues *utils.ServerInspectData, serverImage string) error { - isUyuni, err := isUyuni(cnx) - if err != nil { - return utils.Errorf(err, L("cannot check server release")) - } - isUyuniImage := inspectedValues.UyuniRelease != "" - isSumaImage := inspectedValues.SuseManagerRelease != "" - - if isUyuni && isSumaImage { - return fmt.Errorf( - L("currently SUSE Manager %s is installed, instead the image is Uyuni. Upgrade is not supported"), - inspectedValues.SuseManagerRelease, - ) - } - - if !isUyuni && isUyuniImage { - return fmt.Errorf( - L("currently Uyuni %s is installed, instead the image is SUSE Manager. Upgrade is not supported"), - inspectedValues.UyuniRelease, - ) - } - - if isUyuni { - cnxArgs := []string{"s/Uyuni release //g", "/etc/uyuni-release"} - currentUyuniRelease, err := cnx.Exec("sed", cnxArgs...) - if err != nil { - return utils.Errorf(err, L("failed to read current uyuni release")) - } - log.Debug().Msgf("Current release is %s", string(currentUyuniRelease)) - if !isUyuniImage { - return fmt.Errorf(L("cannot fetch release from image %s"), serverImage) - } - log.Debug().Msgf("Image %s is %s", serverImage, inspectedValues.UyuniRelease) - if utils.CompareVersion(inspectedValues.UyuniRelease, string(currentUyuniRelease)) < 0 { +func SanityCheck( + runningValues *utils.ServerInspectData, + inspectedValues *utils.ServerInspectData, + serverImage string, +) error { + // Skip the uyuni / SUSE Manager release checks if the runningValues is nil. + if runningValues != nil { + isUyuni := runningValues.UyuniRelease != "" + isUyuniImage := inspectedValues.UyuniRelease != "" + isSumaImage := inspectedValues.SuseManagerRelease != "" + + if isUyuni && isSumaImage { return fmt.Errorf( - L("cannot downgrade from version %[1]s to %[2]s"), - string(currentUyuniRelease), inspectedValues.UyuniRelease, + L("currently SUSE Manager %s is installed, instead the image is Uyuni. Upgrade is not supported"), + inspectedValues.SuseManagerRelease, ) } - } else { - bCurrentSuseManagerRelease, err := cnx.Exec("sed", "s/.*(\\([0-9.]*\\)).*/\\1/g", "/etc/susemanager-release") - currentSuseManagerRelease := strings.TrimSuffix(string(bCurrentSuseManagerRelease), "\n") - if err != nil { - return utils.Errorf(err, L("failed to read current susemanager release")) - } - log.Debug().Msgf("Current release is %s", currentSuseManagerRelease) - if !isSumaImage { - return fmt.Errorf(L("cannot fetch release from image %s"), serverImage) - } - log.Debug().Msgf("Image %s is %s", serverImage, inspectedValues.SuseManagerRelease) - if utils.CompareVersion(inspectedValues.SuseManagerRelease, currentSuseManagerRelease) < 0 { + + if !isUyuni && isUyuniImage { return fmt.Errorf( - L("cannot downgrade from version %[1]s to %[2]s"), - currentSuseManagerRelease, inspectedValues.SuseManagerRelease, + L("currently Uyuni %s is installed, instead the image is SUSE Manager. Upgrade is not supported"), + inspectedValues.UyuniRelease, ) } + + if isUyuni { + currentUyuniRelease := runningValues.UyuniRelease + log.Debug().Msgf("Current release is %s", string(currentUyuniRelease)) + if !isUyuniImage { + return fmt.Errorf(L("cannot fetch release from image %s"), serverImage) + } + log.Debug().Msgf("Image %s is %s", serverImage, inspectedValues.UyuniRelease) + if utils.CompareVersion(inspectedValues.UyuniRelease, string(currentUyuniRelease)) < 0 { + return fmt.Errorf( + L("cannot downgrade from version %[1]s to %[2]s"), + string(currentUyuniRelease), inspectedValues.UyuniRelease, + ) + } + } else { + currentSuseManagerRelease := runningValues.SuseManagerRelease + log.Debug().Msgf("Current release is %s", currentSuseManagerRelease) + if !isSumaImage { + return fmt.Errorf(L("cannot fetch release from image %s"), serverImage) + } + log.Debug().Msgf("Image %s is %s", serverImage, inspectedValues.SuseManagerRelease) + if utils.CompareVersion(inspectedValues.SuseManagerRelease, currentSuseManagerRelease) < 0 { + return fmt.Errorf( + L("cannot downgrade from version %[1]s to %[2]s"), + currentSuseManagerRelease, inspectedValues.SuseManagerRelease, + ) + } + } } + // Perform PostgreSQL version checks. if inspectedValues.ImagePgVersion == "" { - return fmt.Errorf(L("cannot fetch postgresql version from %s"), serverImage) + return fmt.Errorf(L("cannot fetch PostgreSQL version from %s"), serverImage) } log.Debug().Msgf("Image %s has PostgreSQL %s", serverImage, inspectedValues.ImagePgVersion) if inspectedValues.CurrentPgVersion == "" { - return errors.New(L("posgresql is not installed in the current deployment")) + return errors.New(L("PostgreSQL is not installed in the current deployment")) } log.Debug().Msgf("Current deployment has PostgreSQL %s", inspectedValues.CurrentPgVersion) return nil } - -func isUyuni(cnx *shared.Connection) (bool, error) { - cnxArgs := []string{"/etc/uyuni-release"} - _, err := cnx.Exec("cat", cnxArgs...) - if err != nil { - cnxArgs := []string{"/etc/susemanager-release"} - _, err := cnx.Exec("cat", cnxArgs...) - if err != nil { - return false, errors.New(L("cannot find either /etc/uyuni-release or /etc/susemanagere-release")) - } - return false, nil - } - return true, nil -} diff --git a/mgradm/shared/utils/exec_test.go b/mgradm/shared/utils/exec_test.go new file mode 100644 index 000000000..b54f431d6 --- /dev/null +++ b/mgradm/shared/utils/exec_test.go @@ -0,0 +1,82 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "fmt" + "strings" + "testing" + + "github.com/uyuni-project/uyuni-tools/shared/testutils" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +func TestSanityCheck(t *testing.T) { + type dataType struct { + oldUyuniRelease string + oldSumaRelease string + oldPgsqlVersion string + newUyuniRelease string + newSumaRelease string + newPgsqlVersion string + errorPart string + } + data := []dataType{ + {"2024.07", "", "16", "2024.13", "", "17", ""}, + {"", "5.0.1", "16", "", "5.1.0", "17", ""}, + { + "2024.13", "", "17", "2024.07", "", "16", + "cannot downgrade", + }, + { + "", "5.1.0", "17", "", "5.0.1", "16", + "cannot downgrade", + }, + { + "2024.07", "", "16", "", "5.1.0", "17", + "Upgrade is not supported", + }, + { + "", "5.1.0", "17", "2024.07", "", "16", + "Upgrade is not supported", + }, + { + "2024.07", "", "16", "2024.13", "", "", + "cannot fetch PostgreSQL", + }, + { + "2024.07", "", "", "2024.13", "", "17", + "PostgreSQL is not installed", + }, + } + + for i, test := range data { + runningValues := utils.ServerInspectData{ + UyuniRelease: test.oldUyuniRelease, + SuseManagerRelease: test.oldSumaRelease, + } + newValues := utils.ServerInspectData{ + CommonInspectData: utils.CommonInspectData{ + CurrentPgVersion: test.oldPgsqlVersion, + ImagePgVersion: test.newPgsqlVersion, + }, + UyuniRelease: test.newUyuniRelease, + SuseManagerRelease: test.newSumaRelease, + } + err := SanityCheck(&runningValues, &newValues, "path/to/image") + if test.errorPart != "" { + if err != nil { + testutils.AssertTrue( + t, fmt.Sprintf("test %d: Unexpected error message: %s", i+1, err), + strings.Contains(err.Error(), test.errorPart), + ) + } else { + t.Errorf("test %d: expected an error, got none", i+1) + } + } else { + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected error", i+1), nil, err) + } + } +} diff --git a/shared/utils/utils_test.go b/shared/utils/utils_test.go index bc21ece86..b7707275c 100644 --- a/shared/utils/utils_test.go +++ b/shared/utils/utils_test.go @@ -387,3 +387,9 @@ func TestSaveBinaryData(t *testing.T) { fmt.Sprintf("%v", data), fmt.Sprintf("%v", storedData), ) } + +func TestCompareVersion(t *testing.T) { + testutils.AssertTrue(t, "2024.07 is not inferior to 2024.13", CompareVersion("2024.07", "2024.13") < 0) + testutils.AssertTrue(t, "2024.13 is not superior to 2024.07", CompareVersion("2024.13", "2024.07") > 0) + testutils.AssertTrue(t, "2024.13 is not equal to 2024.13", CompareVersion("2024.13", "2024.13") == 0) +} From ced59b592d5786abc9b9b2523fa46f9a05538779 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 2 Oct 2024 16:22:44 +0200 Subject: [PATCH 09/19] Use one data structure for install, migrate, upgrade flags In order to later share code between those 3 very similar commands, we need to share the parameters data structure. --- mgradm/cmd/install/kubernetes/kubernetes.go | 14 +- .../cmd/install/kubernetes/kubernetes_test.go | 5 +- mgradm/cmd/install/kubernetes/utils.go | 23 +-- mgradm/cmd/install/podman/podman.go | 9 +- mgradm/cmd/install/podman/podman_test.go | 6 +- mgradm/cmd/install/podman/utils.go | 38 ++--- mgradm/cmd/install/shared/flags.go | 101 ------------- mgradm/cmd/install/shared/shared.go | 32 ++-- mgradm/cmd/migrate/kubernetes/kubernetes.go | 16 +- .../cmd/migrate/kubernetes/kubernetes_test.go | 11 +- mgradm/cmd/migrate/kubernetes/utils.go | 17 ++- mgradm/cmd/migrate/podman/podman.go | 10 +- mgradm/cmd/migrate/podman/podman_test.go | 10 +- mgradm/cmd/migrate/podman/utils.go | 7 +- mgradm/cmd/migrate/shared/flags.go | 13 -- mgradm/cmd/upgrade/kubernetes/kubernetes.go | 14 +- .../cmd/upgrade/kubernetes/kubernetes_test.go | 3 +- mgradm/cmd/upgrade/kubernetes/utils.go | 4 +- mgradm/cmd/upgrade/podman/podman.go | 10 +- mgradm/cmd/upgrade/podman/podman_test.go | 6 +- mgradm/cmd/upgrade/podman/utils.go | 2 +- mgradm/cmd/upgrade/shared/flags.go | 9 -- mgradm/shared/kubernetes/deployment_test.go | 2 +- mgradm/shared/kubernetes/flags.go | 17 +++ mgradm/shared/utils/flags.go | 138 ++++++++++++++++++ .../shared => shared/utils}/flags_test.go | 2 +- shared/testutils/flagstests/mgradm_install.go | 73 ++++----- 27 files changed, 322 insertions(+), 270 deletions(-) create mode 100644 mgradm/shared/kubernetes/flags.go create mode 100644 mgradm/shared/utils/flags.go rename mgradm/{cmd/install/shared => shared/utils}/flags_test.go (98%) diff --git a/mgradm/cmd/install/kubernetes/kubernetes.go b/mgradm/cmd/install/kubernetes/kubernetes.go index dd3bca7a5..34ce80810 100644 --- a/mgradm/cmd/install/kubernetes/kubernetes.go +++ b/mgradm/cmd/install/kubernetes/kubernetes.go @@ -10,18 +10,14 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/shared" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) -type kubernetesInstallFlags struct { - shared.InstallFlags `mapstructure:",squash"` - Helm cmd_utils.HelmFlags -} - -func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetesInstallFlags]) *cobra.Command { +func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetes.KubernetesServerFlags]) *cobra.Command { cmd := &cobra.Command{ Use: "kubernetes [fqdn]", Short: L("Install a new server on a kubernetes cluster"), @@ -37,10 +33,10 @@ NOTE: installing on a remote cluster is not supported yet! `), Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - var flags kubernetesInstallFlags + var flags kubernetes.KubernetesServerFlags flagsUpdater := func(v *viper.Viper) { - flags.InstallFlags.Coco.IsChanged = v.IsSet("coco.replicas") - flags.InstallFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") + flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas") + flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") } return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run) }, diff --git a/mgradm/cmd/install/kubernetes/kubernetes_test.go b/mgradm/cmd/install/kubernetes/kubernetes_test.go index 84139cba1..69fdee454 100644 --- a/mgradm/cmd/install/kubernetes/kubernetes_test.go +++ b/mgradm/cmd/install/kubernetes/kubernetes_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/spf13/cobra" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" "github.com/uyuni-project/uyuni-tools/shared/testutils" "github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests" "github.com/uyuni-project/uyuni-tools/shared/types" @@ -21,10 +22,10 @@ func TestParamsParsing(t *testing.T) { args = append(args, "srv.fq.dn") // Test function asserting that the args are properly parsed - tester := func(_ *types.GlobalFlags, flags *kubernetesInstallFlags, + tester := func(_ *types.GlobalFlags, flags *kubernetes.KubernetesServerFlags, _ *cobra.Command, args []string, ) error { - flagstests.AssertInstallFlags(t, &flags.InstallFlags) + flagstests.AssertInstallFlags(t, &flags.ServerFlags) flagstests.AssertServerHelmFlags(t, &flags.Helm) testutils.AssertEquals(t, "Wrong FQDN", "srv.fq.dn", args[0]) return nil diff --git a/mgradm/cmd/install/kubernetes/utils.go b/mgradm/cmd/install/kubernetes/utils.go index 8a2119ba0..36fcc5e94 100644 --- a/mgradm/cmd/install/kubernetes/utils.go +++ b/mgradm/cmd/install/kubernetes/utils.go @@ -23,8 +23,9 @@ import ( shared_utils "github.com/uyuni-project/uyuni-tools/shared/utils" ) -func installForKubernetes(_ *types.GlobalFlags, - flags *kubernetesInstallFlags, +func installForKubernetes( + _ *types.GlobalFlags, + flags *kubernetes.KubernetesServerFlags, cmd *cobra.Command, args []string, ) error { @@ -34,7 +35,7 @@ func installForKubernetes(_ *types.GlobalFlags, } } - flags.CheckParameters(cmd, "kubectl") + flags.Installation.CheckParameters(cmd, "kubectl") cnx := shared.NewConnection("kubectl", "", shared_kubernetes.ServerFilter) fqdn := args[0] @@ -43,12 +44,12 @@ func installForKubernetes(_ *types.GlobalFlags, return err } - helmArgs := []string{"--set", "timezone=" + flags.TZ} + helmArgs := []string{"--set", "timezone=" + flags.Installation.TZ} if flags.Mirror != "" { // TODO Handle claims for multi-node clusters helmArgs = append(helmArgs, "--set", "mirror.hostPath="+flags.Mirror) } - if flags.Debug.Java { + if flags.Installation.Debug.Java { helmArgs = append(helmArgs, "--set", "exposeJavaDebug=true") } @@ -59,13 +60,13 @@ func installForKubernetes(_ *types.GlobalFlags, } // Deploy the SSL CA or server certificate - if flags.SSL.UseExisting() { - if err := kubernetes.DeployExistingCertificate(&flags.Helm, &flags.SSL); err != nil { + if flags.Installation.SSL.UseExisting() { + if err := kubernetes.DeployExistingCertificate(&flags.Helm, &flags.Installation.SSL); err != nil { return err } } else { sslArgs, err := kubernetes.DeployCertificate( - &flags.Helm, &flags.SSL, clusterInfos.GetKubeconfig(), fqdn, + &flags.Helm, &flags.Installation.SSL, clusterInfos.GetKubeconfig(), fqdn, flags.Image.PullPolicy, ) @@ -76,7 +77,7 @@ func installForKubernetes(_ *types.GlobalFlags, } // Create a secret using SCC credentials if any are provided - helmArgs, err = shared_kubernetes.AddSCCSecret(helmArgs, flags.Helm.Uyuni.Namespace, &flags.SCC) + helmArgs, err = shared_kubernetes.AddSCCSecret(helmArgs, flags.Helm.Uyuni.Namespace, &flags.Installation.SCC) if err != nil { return err } @@ -84,7 +85,7 @@ func installForKubernetes(_ *types.GlobalFlags, // Deploy Uyuni and wait for it to be up if err := kubernetes.Deploy( cnx, flags.Image.Registry, &flags.Image, &flags.HubXmlrpc, &flags.Helm, - clusterInfos, fqdn, flags.Debug.Java, false, helmArgs..., + clusterInfos, fqdn, flags.Installation.Debug.Java, false, helmArgs..., ); err != nil { return shared_utils.Errorf(err, L("cannot deploy uyuni")) } @@ -94,7 +95,7 @@ func installForKubernetes(_ *types.GlobalFlags, "NO_SSL": "Y", } - if err := install_shared.RunSetup(cnx, &flags.InstallFlags, args[0], envs); err != nil { + if err := install_shared.RunSetup(cnx, &flags.ServerFlags, args[0], envs); err != nil { namespace, err := cnx.GetNamespace("") if err != nil { return shared_utils.Errorf(err, L("failed to stop service")) diff --git a/mgradm/cmd/install/podman/podman.go b/mgradm/cmd/install/podman/podman.go index 3e5b13036..5c11b5b83 100644 --- a/mgradm/cmd/install/podman/podman.go +++ b/mgradm/cmd/install/podman/podman.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/shared" + adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/podman" "github.com/uyuni-project/uyuni-tools/shared/types" @@ -15,8 +16,8 @@ import ( ) type podmanInstallFlags struct { - shared.InstallFlags `mapstructure:",squash"` - Podman podman.PodmanFlags + adm_utils.ServerFlags `mapstructure:",squash"` + Podman podman.PodmanFlags } func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[podmanInstallFlags]) *cobra.Command { @@ -33,8 +34,8 @@ NOTE: installing on a remote podman is not supported yet! RunE: func(cmd *cobra.Command, args []string) error { var flags podmanInstallFlags flagsUpdater := func(v *viper.Viper) { - flags.InstallFlags.Coco.IsChanged = v.IsSet("coco.replicas") - flags.InstallFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") + flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas") + flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") } return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run) }, diff --git a/mgradm/cmd/install/podman/podman_test.go b/mgradm/cmd/install/podman/podman_test.go index fff498ca1..a37b790bf 100644 --- a/mgradm/cmd/install/podman/podman_test.go +++ b/mgradm/cmd/install/podman/podman_test.go @@ -21,8 +21,10 @@ func TestParamsParsing(t *testing.T) { args = append(args, "srv.fq.dn") // Test function asserting that the args are properly parsed - tester := func(_ *types.GlobalFlags, flags *podmanInstallFlags, _ *cobra.Command, args []string) error { - flagstests.AssertInstallFlags(t, &flags.InstallFlags) + tester := func(_ *types.GlobalFlags, flags *podmanInstallFlags, + _ *cobra.Command, args []string, + ) error { + flagstests.AssertInstallFlags(t, &flags.ServerFlags) flagstests.AssertPodmanInstallFlags(t, &flags.Podman) testutils.AssertEquals(t, "Wrong FQDN", "srv.fq.dn", args[0]) return nil diff --git a/mgradm/cmd/install/podman/utils.go b/mgradm/cmd/install/podman/utils.go index fcf67769d..8801a380f 100644 --- a/mgradm/cmd/install/podman/utils.go +++ b/mgradm/cmd/install/podman/utils.go @@ -29,7 +29,9 @@ func waitForSystemStart( image string, flags *podmanInstallFlags, ) error { - err := podman.GenerateSystemdService(systemd, flags.TZ, image, flags.Debug.Java, flags.Mirror, flags.Podman.Args) + err := podman.GenerateSystemdService( + systemd, flags.Installation.TZ, image, flags.Installation.Debug.Java, flags.Mirror, flags.Podman.Args, + ) if err != nil { return err } @@ -55,7 +57,7 @@ func installForPodman( return err } - authFile, cleaner, err := shared_podman.PodmanLogin(hostData, flags.SCC) + authFile, cleaner, err := shared_podman.PodmanLogin(hostData, flags.Installation.SCC) if err != nil { return utils.Errorf(err, L("failed to login to registry.suse.com")) } @@ -67,7 +69,7 @@ func installForPodman( ) } - flags.CheckParameters(cmd, "podman") + flags.Installation.CheckParameters(cmd, "podman") if _, err := exec.LookPath("podman"); err != nil { return errors.New(L("install podman before running this command")) } @@ -93,26 +95,26 @@ func installForPodman( return utils.Errorf(err, L("cannot wait for system start")) } - caPassword := flags.SSL.Password - if flags.SSL.UseExisting() { + caPassword := flags.Installation.SSL.Password + if flags.Installation.SSL.UseExisting() { // We need to have a password for the generated CA, even though it will be thrown away after install caPassword = "dummy" } env := map[string]string{ - "CERT_O": flags.SSL.Org, - "CERT_OU": flags.SSL.OU, - "CERT_CITY": flags.SSL.City, - "CERT_STATE": flags.SSL.State, - "CERT_COUNTRY": flags.SSL.Country, - "CERT_EMAIL": flags.SSL.Email, - "CERT_CNAMES": strings.Join(append([]string{fqdn}, flags.SSL.Cnames...), ","), + "CERT_O": flags.Installation.SSL.Org, + "CERT_OU": flags.Installation.SSL.OU, + "CERT_CITY": flags.Installation.SSL.City, + "CERT_STATE": flags.Installation.SSL.State, + "CERT_COUNTRY": flags.Installation.SSL.Country, + "CERT_EMAIL": flags.Installation.SSL.Email, + "CERT_CNAMES": strings.Join(append([]string{fqdn}, flags.Installation.SSL.Cnames...), ","), "CERT_PASS": caPassword, } log.Info().Msg(L("Run setup command in the container")) - if err := install_shared.RunSetup(cnx, &flags.InstallFlags, fqdn, env); err != nil { + if err := install_shared.RunSetup(cnx, &flags.ServerFlags, fqdn, env); err != nil { if stopErr := systemd.StopService(shared_podman.ServerService); stopErr != nil { log.Error().Msgf(L("Failed to stop service: %v"), stopErr) } @@ -129,12 +131,12 @@ func installForPodman( if flags.Coco.Replicas > 0 { // This may need to be moved up later once more containers require DB access - if err := shared_podman.CreateDBSecrets(flags.DB.User, flags.DB.Password); err != nil { + if err := shared_podman.CreateDBSecrets(flags.Installation.DB.User, flags.Installation.DB.Password); err != nil { return err } if err := coco.SetupCocoContainer( systemd, authFile, flags.Image.Registry, flags.Coco, flags.Image, - flags.DB.Name, flags.DB.Port, + flags.Installation.DB.Name, flags.Installation.DB.Port, ); err != nil { return err } @@ -148,8 +150,10 @@ func installForPodman( } } - if flags.SSL.UseExisting() { - if err := podman.UpdateSSLCertificate(cnx, &flags.SSL.Ca, &flags.SSL.Server); err != nil { + if flags.Installation.SSL.UseExisting() { + if err := podman.UpdateSSLCertificate( + cnx, &flags.Installation.SSL.Ca, &flags.Installation.SSL.Server, + ); err != nil { return utils.Errorf(err, L("cannot update SSL certificate")) } } diff --git a/mgradm/cmd/install/shared/flags.go b/mgradm/cmd/install/shared/flags.go index 50d7534eb..0c01fc73c 100644 --- a/mgradm/cmd/install/shared/flags.go +++ b/mgradm/cmd/install/shared/flags.go @@ -5,114 +5,13 @@ package shared import ( - "fmt" - "net/mail" - "regexp" - "strings" - "github.com/spf13/cobra" cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - apiTypes "github.com/uyuni-project/uyuni-tools/shared/api/types" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/ssl" - "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) -// DBFlags can store all values required to connect to a database. -type DBFlags struct { - Host string - Name string - Port int - User string - Password string - Protocol string - Provider string - Admin struct { - User string - Password string - } -} - -// DebugFlags contains information about enabled/disabled debug. -type DebugFlags struct { - Java bool -} - -// InstallFlags stores all the flags used by install command. -type InstallFlags struct { - TZ string - Email string - EmailFrom string - IssParent string - Mirror string - Tftp bool - DB DBFlags - ReportDB DBFlags - SSL cmd_utils.InstallSSLFlags - SCC types.SCCCredentials - Debug DebugFlags - Image types.ImageFlags `mapstructure:",squash"` - Coco cmd_utils.CocoFlags - HubXmlrpc cmd_utils.HubXmlrpcFlags - Admin apiTypes.User - Organization string -} - -// idChecker verifies that the value is a valid identifier. -func idChecker(value string) bool { - r := regexp.MustCompile(`^([[:alnum:]]|[._-])+$`) - if r.MatchString(value) { - return true - } - fmt.Println(L("Can only contain letters, digits . _ and -")) - return false -} - -// emailChecker verifies that the value is a valid email address. -func emailChecker(value string) bool { - address, err := mail.ParseAddress(value) - if err != nil || address.Name != "" || strings.ContainsAny(value, "<>") { - fmt.Println(L("Not a valid email address")) - return false - } - return true -} - -// CheckParameters checks parameters for install command. -func (flags *InstallFlags) CheckParameters(cmd *cobra.Command, command string) { - if flags.DB.Password == "" { - flags.DB.Password = utils.GetRandomBase64(30) - } - - if flags.ReportDB.Password == "" { - flags.ReportDB.Password = utils.GetRandomBase64(30) - } - - // Make sure we have all the required 3rd party flags or none - flags.SSL.CheckParameters() - - // Since we use cert-manager for self-signed certificates on kubernetes we don't need password for it - if !flags.SSL.UseExisting() && command == "podman" { - utils.AskPasswordIfMissing(&flags.SSL.Password, cmd.Flag("ssl-password").Usage, 0, 0) - } - - // Use the host timezone if the user didn't define one - if flags.TZ == "" { - flags.TZ = utils.GetLocalTimezone() - } - - utils.AskIfMissing(&flags.Email, cmd.Flag("email").Usage, 1, 128, emailChecker) - utils.AskIfMissing(&flags.EmailFrom, cmd.Flag("emailfrom").Usage, 0, 0, emailChecker) - - utils.AskIfMissing(&flags.Admin.Login, cmd.Flag("admin-login").Usage, 1, 64, idChecker) - utils.AskPasswordIfMissing(&flags.Admin.Password, cmd.Flag("admin-password").Usage, 5, 48) - utils.AskIfMissing(&flags.Organization, cmd.Flag("organization").Usage, 3, 128, nil) - - flags.SSL.Email = flags.Email - flags.Admin.Email = flags.Email -} - // AddInspectFlags add flags to inspect command. func AddInspectFlags(cmd *cobra.Command) { cmd_utils.AddSCCFlag(cmd) diff --git a/mgradm/cmd/install/shared/shared.go b/mgradm/cmd/install/shared/shared.go index 9a7659d1a..261619b54 100644 --- a/mgradm/cmd/install/shared/shared.go +++ b/mgradm/cmd/install/shared/shared.go @@ -24,7 +24,7 @@ import ( const setupName = "setup.sh" // RunSetup execute the setup. -func RunSetup(cnx *shared.Connection, flags *InstallFlags, fqdn string, env map[string]string) error { +func RunSetup(cnx *shared.Connection, flags *adm_utils.ServerFlags, fqdn string, env map[string]string) error { // Containers should be running now, check storage if it is using volume from already configured server preconfigured := false if isServerConfigured(cnx) { @@ -34,7 +34,7 @@ func RunSetup(cnx *shared.Connection, flags *InstallFlags, fqdn string, env map[ preconfigured = true } - tmpFolder, cleaner, err := generateSetupScript(flags, fqdn, env) + tmpFolder, cleaner, err := generateSetupScript(&flags.Installation, fqdn, flags.Mirror, env) if err != nil { return err } @@ -52,14 +52,16 @@ func RunSetup(cnx *shared.Connection, flags *InstallFlags, fqdn string, env map[ return utils.Errorf(err, L("failed to add SSL CA certificate to host trusted certificates")) } + installFlags := &flags.Installation + // Call the org.createFirst api if flags are passed // This should not happen since the password is queried and enforced - if flags.Admin.Password != "" { + if installFlags.Admin.Password != "" { apiCnx := api.ConnectionDetails{ Server: fqdn, Insecure: false, - User: flags.Admin.Login, - Password: flags.Admin.Password, + User: installFlags.Admin.Login, + Password: installFlags.Admin.Password, } // Check if there is already admin user with given password and organization with same name @@ -68,11 +70,14 @@ func RunSetup(cnx *shared.Connection, flags *InstallFlags, fqdn string, env map[ log.Error().Err(err).Msgf(L("unable to prepare API client")) } if err = client.Login(); err == nil { - if _, err := org.GetOrganizationDetails(&apiCnx, flags.Organization); err == nil { + if _, err := org.GetOrganizationDetails(&apiCnx, installFlags.Organization); err == nil { log.Info().Msgf(L("Server organization already exists, reusing")) } else { log.Debug().Err(err).Msg("Error returned by server") - log.Warn().Msgf(L("Administration user already exists, but organization %s could not be found"), flags.Organization) + log.Warn().Msgf( + L("Administration user already exists, but organization %s could not be found"), + installFlags.Organization, + ) } } else { var connError *url.Error @@ -81,7 +86,7 @@ func RunSetup(cnx *shared.Connection, flags *InstallFlags, fqdn string, env map[ return err } // We do not have any user existing, create one. CreateFirst skip user login - _, err := org.CreateFirst(&apiCnx, flags.Organization, &flags.Admin) + _, err := org.CreateFirst(&apiCnx, installFlags.Organization, &installFlags.Admin) if err != nil { if preconfigured { log.Warn().Msgf(L("Administration user already exists, but provided credentials are not valid")) @@ -92,14 +97,19 @@ func RunSetup(cnx *shared.Connection, flags *InstallFlags, fqdn string, env map[ } } - log.Info().Msgf(L("Server set up, login on https://%[1]s with %[2]s user"), fqdn, flags.Admin.Login) + log.Info().Msgf(L("Server set up, login on https://%[1]s with %[2]s user"), fqdn, installFlags.Admin.Login) return nil } // generateSetupScript creates a temporary folder with the setup script to execute in the container. // The script exports all the needed environment variables and calls uyuni's mgr-setup. // Podman or kubernetes-specific variables can be passed using extraEnv parameter. -func generateSetupScript(flags *InstallFlags, fqdn string, extraEnv map[string]string) (string, func(), error) { +func generateSetupScript( + flags *adm_utils.InstallationFlags, + fqdn string, + mirror string, + extraEnv map[string]string, +) (string, func(), error) { localHostValues := []string{ "localhost", "127.0.0.1", @@ -143,7 +153,7 @@ func generateSetupScript(flags *InstallFlags, fqdn string, extraEnv map[string]s "SCC_USER": flags.SCC.User, "SCC_PASS": flags.SCC.Password, } - if flags.Mirror != "" { + if mirror != "" { env["MIRROR_PATH"] = "/mirror" } diff --git a/mgradm/cmd/migrate/kubernetes/kubernetes.go b/mgradm/cmd/migrate/kubernetes/kubernetes.go index 7ed5635fe..48079bf13 100644 --- a/mgradm/cmd/migrate/kubernetes/kubernetes.go +++ b/mgradm/cmd/migrate/kubernetes/kubernetes.go @@ -10,20 +10,14 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/uyuni-project/uyuni-tools/mgradm/cmd/migrate/shared" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) -type kubernetesMigrateFlags struct { - shared.MigrateFlags `mapstructure:",squash"` - Helm cmd_utils.HelmFlags - SCC types.SCCCredentials - SSL types.SSLCertGenerationFlags -} - -func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetesMigrateFlags]) *cobra.Command { +func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetes.KubernetesServerFlags]) *cobra.Command { migrateCmd := &cobra.Command{ Use: "kubernetes [source server FQDN]", Short: L("Migrate a remote server to containers running on a kubernetes cluster"), @@ -44,10 +38,10 @@ NOTE: migrating to a remote cluster is not supported yet! `), Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { - var flags kubernetesMigrateFlags + var flags kubernetes.KubernetesServerFlags flagsUpdater := func(v *viper.Viper) { - flags.MigrateFlags.Coco.IsChanged = v.IsSet("coco.replicas") - flags.MigrateFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") + flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas") + flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") } return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run) }, diff --git a/mgradm/cmd/migrate/kubernetes/kubernetes_test.go b/mgradm/cmd/migrate/kubernetes/kubernetes_test.go index 02b9412f9..1c846d63b 100644 --- a/mgradm/cmd/migrate/kubernetes/kubernetes_test.go +++ b/mgradm/cmd/migrate/kubernetes/kubernetes_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/spf13/cobra" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" "github.com/uyuni-project/uyuni-tools/shared/testutils" "github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests" "github.com/uyuni-project/uyuni-tools/shared/types" @@ -32,19 +33,19 @@ func TestParamsParsing(t *testing.T) { args = append(args, flagstests.ServerHelmFlagsTestArgs...) // Test function asserting that the args are properly parsed - tester := func(_ *types.GlobalFlags, flags *kubernetesMigrateFlags, + tester := func(_ *types.GlobalFlags, flags *kubernetes.KubernetesServerFlags, _ *cobra.Command, args []string, ) error { - testutils.AssertTrue(t, "Prepare not set", flags.Prepare) + testutils.AssertTrue(t, "Prepare not set", flags.Migration.Prepare) flagstests.AssertMirrorFlag(t, flags.Mirror) - flagstests.AssertSCCFlag(t, &flags.SCC) + flagstests.AssertSCCFlag(t, &flags.Installation.SCC) flagstests.AssertImageFlag(t, &flags.Image) flagstests.AssertDBUpgradeImageFlag(t, &flags.DBUpgradeImage) flagstests.AssertCocoFlag(t, &flags.Coco) flagstests.AssertHubXmlrpcFlag(t, &flags.HubXmlrpc) - testutils.AssertEquals(t, "Error parsing --user", "sudoer", flags.User) + testutils.AssertEquals(t, "Error parsing --user", "sudoer", flags.Migration.User) flagstests.AssertServerHelmFlags(t, &flags.Helm) - testutils.AssertEquals(t, "Error parsing --ssl-password", "sslsecret", flags.SSL.Password) + testutils.AssertEquals(t, "Error parsing --ssl-password", "sslsecret", flags.Installation.SSL.Password) testutils.AssertEquals(t, "Wrong FQDN", "source.fq.dn", args[0]) return nil } diff --git a/mgradm/cmd/migrate/kubernetes/utils.go b/mgradm/cmd/migrate/kubernetes/utils.go index d0683002f..4c073d047 100644 --- a/mgradm/cmd/migrate/kubernetes/utils.go +++ b/mgradm/cmd/migrate/kubernetes/utils.go @@ -28,7 +28,7 @@ import ( func migrateToKubernetes( _ *types.GlobalFlags, - flags *kubernetesMigrateFlags, + flags *kubernetes.KubernetesServerFlags, _ *cobra.Command, args []string, ) error { @@ -64,7 +64,7 @@ func migrateToKubernetes( sshConfigPath, sshKnownhostsPath := migration_shared.GetSSHPaths() // Prepare the migration script and folder - scriptDir, cleaner, err := adm_utils.GenerateMigrationScript(fqdn, flags.User, true, flags.Prepare) + scriptDir, cleaner, err := adm_utils.GenerateMigrationScript(fqdn, flags.Migration.User, true, flags.Migration.Prepare) if err != nil { return utils.Errorf(err, L("failed to generate migration script")) } @@ -82,7 +82,7 @@ func migrateToKubernetes( helmArgs := []string{} // Create a secret using SCC credentials if any are provided - helmArgs, err = shared_kubernetes.AddSCCSecret(helmArgs, flags.Helm.Uyuni.Namespace, &flags.SCC) + helmArgs, err = shared_kubernetes.AddSCCSecret(helmArgs, flags.Helm.Uyuni.Namespace, &flags.Installation.SCC) if err != nil { return err } @@ -95,8 +95,9 @@ func migrateToKubernetes( "--set", "migration.dataPath="+scriptDir, ) - if err := kubernetes.Deploy(cnx, flags.Image.Registry, &flags.Image, &flags.HubXmlrpc, - &flags.Helm, clusterInfos, fqdn, false, flags.Prepare, migrationArgs..., + if err := kubernetes.Deploy( + cnx, flags.Image.Registry, &flags.Image, &flags.HubXmlrpc, + &flags.Helm, clusterInfos, fqdn, false, flags.Migration.Prepare, migrationArgs..., ); err != nil { return utils.Errorf(err, L("cannot run deploy")) } @@ -123,7 +124,7 @@ func migrateToKubernetes( return utils.Errorf(err, L("cannot set replicas to 0")) } - if flags.Prepare { + if flags.Migration.Prepare { log.Info().Msg(L("Migration prepared. Run the 'migrate' command without '--prepare' to finish the migration.")) return nil } @@ -135,7 +136,9 @@ func migrateToKubernetes( } }() - setupSSLArray, err := setupSSL(&flags.Helm, kubeconfig, scriptDir, flags.SSL.Password, flags.Image.PullPolicy) + setupSSLArray, err := setupSSL( + &flags.Helm, kubeconfig, scriptDir, flags.Installation.SSL.Password, flags.Image.PullPolicy, + ) if err != nil { return utils.Errorf(err, L("cannot setup SSL")) } diff --git a/mgradm/cmd/migrate/podman/podman.go b/mgradm/cmd/migrate/podman/podman.go index 73b4aeb27..90aa131b4 100644 --- a/mgradm/cmd/migrate/podman/podman.go +++ b/mgradm/cmd/migrate/podman/podman.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/uyuni-project/uyuni-tools/mgradm/cmd/migrate/shared" + cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" podman_utils "github.com/uyuni-project/uyuni-tools/shared/podman" "github.com/uyuni-project/uyuni-tools/shared/types" @@ -15,9 +16,8 @@ import ( ) type podmanMigrateFlags struct { - shared.MigrateFlags `mapstructure:",squash"` - SCC types.SCCCredentials - Podman podman_utils.PodmanFlags + cmd_utils.ServerFlags `mapstructure:",squash"` + Podman podman_utils.PodmanFlags } func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[podmanMigrateFlags]) *cobra.Command { @@ -38,8 +38,8 @@ NOTE: migrating to a remote podman is not supported yet! RunE: func(cmd *cobra.Command, args []string) error { var flags podmanMigrateFlags flagsUpdater := func(v *viper.Viper) { - flags.MigrateFlags.Coco.IsChanged = v.IsSet("coco.replicas") - flags.MigrateFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") + flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas") + flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") } return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run) }, diff --git a/mgradm/cmd/migrate/podman/podman_test.go b/mgradm/cmd/migrate/podman/podman_test.go index 61c2f62a5..a6b54a8e7 100644 --- a/mgradm/cmd/migrate/podman/podman_test.go +++ b/mgradm/cmd/migrate/podman/podman_test.go @@ -29,15 +29,17 @@ func TestParamsParsing(t *testing.T) { args = append(args, flagstests.PodmanFlagsTestArgs...) // Test function asserting that the args are properly parsed - tester := func(_ *types.GlobalFlags, flags *podmanMigrateFlags, _ *cobra.Command, args []string) error { - testutils.AssertTrue(t, "Prepare not set", flags.Prepare) + tester := func(_ *types.GlobalFlags, flags *podmanMigrateFlags, + _ *cobra.Command, args []string, + ) error { + testutils.AssertTrue(t, "Prepare not set", flags.Migration.Prepare) flagstests.AssertMirrorFlag(t, flags.Mirror) - flagstests.AssertSCCFlag(t, &flags.SCC) + flagstests.AssertSCCFlag(t, &flags.Installation.SCC) flagstests.AssertImageFlag(t, &flags.Image) flagstests.AssertDBUpgradeImageFlag(t, &flags.DBUpgradeImage) flagstests.AssertCocoFlag(t, &flags.Coco) flagstests.AssertHubXmlrpcFlag(t, &flags.HubXmlrpc) - testutils.AssertEquals(t, "Error parsing --user", "sudoer", flags.User) + testutils.AssertEquals(t, "Error parsing --user", "sudoer", flags.Migration.User) flagstests.AssertPodmanInstallFlags(t, &flags.Podman) testutils.AssertEquals(t, "Wrong FQDN", "source.fq.dn", args[0]) return nil diff --git a/mgradm/cmd/migrate/podman/utils.go b/mgradm/cmd/migrate/podman/utils.go index a25ed963e..93865eb89 100644 --- a/mgradm/cmd/migrate/podman/utils.go +++ b/mgradm/cmd/migrate/podman/utils.go @@ -49,7 +49,7 @@ func migrateToPodman( return err } - authFile, cleaner, err := podman_utils.PodmanLogin(hostData, flags.SCC) + authFile, cleaner, err := podman_utils.PodmanLogin(hostData, flags.Installation.SCC) if err != nil { return utils.Errorf(err, L("failed to login to registry.suse.com")) } @@ -71,12 +71,13 @@ func migrateToPodman( sshConfigPath, sshKnownhostsPath := migration_shared.GetSSHPaths() extractedData, err := podman.RunMigration( - preparedImage, sshAuthSocket, sshConfigPath, sshKnownhostsPath, sourceFqdn, flags.User, flags.Prepare, + preparedImage, sshAuthSocket, sshConfigPath, sshKnownhostsPath, sourceFqdn, + flags.Migration.User, flags.Migration.Prepare, ) if err != nil { return utils.Errorf(err, L("cannot run migration script")) } - if flags.Prepare { + if flags.Migration.Prepare { log.Info().Msg(L("Migration prepared. Run the 'migrate' command without '--prepare' to finish the migration.")) return nil } diff --git a/mgradm/cmd/migrate/shared/flags.go b/mgradm/cmd/migrate/shared/flags.go index 75b5e402d..7d1d0b561 100644 --- a/mgradm/cmd/migrate/shared/flags.go +++ b/mgradm/cmd/migrate/shared/flags.go @@ -8,21 +8,8 @@ import ( "github.com/spf13/cobra" "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" - "github.com/uyuni-project/uyuni-tools/shared/types" ) -// MigrateFlags represents flag required by migration command. -type MigrateFlags struct { - Prepare bool - Image types.ImageFlags `mapstructure:",squash"` - DBUpgradeImage types.ImageFlags `mapstructure:"dbupgrade"` - Coco utils.CocoFlags - User string - Mirror string - HubXmlrpc utils.HubXmlrpcFlags - SCC types.SCCCredentials -} - // AddMigrateFlags add migration flags to a command. func AddMigrateFlags(cmd *cobra.Command) { cmd.Flags().Bool("prepare", false, L("Prepare the mgration - copy the data without stopping the source server.")) diff --git a/mgradm/cmd/upgrade/kubernetes/kubernetes.go b/mgradm/cmd/upgrade/kubernetes/kubernetes.go index 3b0aac6d6..bf4916b47 100644 --- a/mgradm/cmd/upgrade/kubernetes/kubernetes.go +++ b/mgradm/cmd/upgrade/kubernetes/kubernetes.go @@ -10,28 +10,24 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/uyuni-project/uyuni-tools/mgradm/cmd/upgrade/shared" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) -type kubernetesUpgradeFlags struct { - shared.UpgradeFlags `mapstructure:",squash"` - Helm cmd_utils.HelmFlags -} - -func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetesUpgradeFlags]) *cobra.Command { +func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetes.KubernetesServerFlags]) *cobra.Command { upgradeCmd := &cobra.Command{ Use: "kubernetes", Short: L("Upgrade a local server on kubernetes"), Long: L("Upgrade a local server on kubernetes"), Args: cobra.ExactArgs(0), RunE: func(cmd *cobra.Command, args []string) error { - var flags kubernetesUpgradeFlags + var flags kubernetes.KubernetesServerFlags flagsUpdater := func(v *viper.Viper) { - flags.UpgradeFlags.Coco.IsChanged = v.IsSet("coco.replicas") - flags.UpgradeFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") + flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas") + flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") } return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run) }, diff --git a/mgradm/cmd/upgrade/kubernetes/kubernetes_test.go b/mgradm/cmd/upgrade/kubernetes/kubernetes_test.go index 5fefb4602..e747c8861 100644 --- a/mgradm/cmd/upgrade/kubernetes/kubernetes_test.go +++ b/mgradm/cmd/upgrade/kubernetes/kubernetes_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/spf13/cobra" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" "github.com/uyuni-project/uyuni-tools/shared/testutils" "github.com/uyuni-project/uyuni-tools/shared/testutils/flagstests" "github.com/uyuni-project/uyuni-tools/shared/types" @@ -26,7 +27,7 @@ func TestParamsParsing(t *testing.T) { args = append(args, flagstests.ServerHelmFlagsTestArgs...) // Test function asserting that the args are properly parsed - tester := func(_ *types.GlobalFlags, flags *kubernetesUpgradeFlags, + tester := func(_ *types.GlobalFlags, flags *kubernetes.KubernetesServerFlags, _ *cobra.Command, _ []string, ) error { flagstests.AssertImageFlag(t, &flags.Image) diff --git a/mgradm/cmd/upgrade/kubernetes/utils.go b/mgradm/cmd/upgrade/kubernetes/utils.go index 161abd297..81818628b 100644 --- a/mgradm/cmd/upgrade/kubernetes/utils.go +++ b/mgradm/cmd/upgrade/kubernetes/utils.go @@ -14,11 +14,11 @@ import ( func upgradeKubernetes( globalFlags *types.GlobalFlags, - flags *kubernetesUpgradeFlags, + flags *kubernetes.KubernetesServerFlags, cmd *cobra.Command, args []string, ) error { return kubernetes.Upgrade( - globalFlags, &flags.UpgradeFlags.Image, &flags.DBUpgradeImage, &flags.HubXmlrpc.Image, flags.Helm, cmd, args, + globalFlags, &flags.ServerFlags.Image, &flags.DBUpgradeImage, &flags.HubXmlrpc.Image, flags.Helm, cmd, args, ) } diff --git a/mgradm/cmd/upgrade/podman/podman.go b/mgradm/cmd/upgrade/podman/podman.go index 72139d807..8cb5652c6 100644 --- a/mgradm/cmd/upgrade/podman/podman.go +++ b/mgradm/cmd/upgrade/podman/podman.go @@ -9,6 +9,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/uyuni-project/uyuni-tools/mgradm/cmd/upgrade/shared" + cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/podman" "github.com/uyuni-project/uyuni-tools/shared/types" @@ -16,9 +17,8 @@ import ( ) type podmanUpgradeFlags struct { - shared.UpgradeFlags `mapstructure:",squash"` - SCC types.SCCCredentials - Podman podman.PodmanFlags + cmd_utils.ServerFlags `mapstructure:",squash"` + Podman podman.PodmanFlags } func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[podmanUpgradeFlags]) *cobra.Command { @@ -29,8 +29,8 @@ func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[podmanUpgradeF RunE: func(cmd *cobra.Command, args []string) error { var flags podmanUpgradeFlags flagsUpdater := func(v *viper.Viper) { - flags.UpgradeFlags.Coco.IsChanged = v.IsSet("coco.replicas") - flags.UpgradeFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") + flags.ServerFlags.Coco.IsChanged = v.IsSet("coco.replicas") + flags.ServerFlags.HubXmlrpc.IsChanged = v.IsSet("hubxmlrpc.replicas") } return utils.CommandHelper(globalFlags, cmd, args, &flags, flagsUpdater, run) }, diff --git a/mgradm/cmd/upgrade/podman/podman_test.go b/mgradm/cmd/upgrade/podman/podman_test.go index 42251f804..92cdf0c28 100644 --- a/mgradm/cmd/upgrade/podman/podman_test.go +++ b/mgradm/cmd/upgrade/podman/podman_test.go @@ -24,12 +24,14 @@ func TestParamsParsing(t *testing.T) { args = append(args, flagstests.PodmanFlagsTestArgs...) // Test function asserting that the args are properly parsed - tester := func(_ *types.GlobalFlags, flags *podmanUpgradeFlags, _ *cobra.Command, _ []string) error { + tester := func(_ *types.GlobalFlags, flags *podmanUpgradeFlags, + _ *cobra.Command, _ []string, + ) error { flagstests.AssertImageFlag(t, &flags.Image) flagstests.AssertDBUpgradeImageFlag(t, &flags.DBUpgradeImage) flagstests.AssertCocoFlag(t, &flags.Coco) flagstests.AssertHubXmlrpcFlag(t, &flags.HubXmlrpc) - flagstests.AssertSCCFlag(t, &flags.SCC) + flagstests.AssertSCCFlag(t, &flags.ServerFlags.Installation.SCC) flagstests.AssertPodmanInstallFlags(t, &flags.Podman) return nil } diff --git a/mgradm/cmd/upgrade/podman/utils.go b/mgradm/cmd/upgrade/podman/utils.go index 9fc192ff8..740f8b2bd 100644 --- a/mgradm/cmd/upgrade/podman/utils.go +++ b/mgradm/cmd/upgrade/podman/utils.go @@ -21,7 +21,7 @@ func upgradePodman(_ *types.GlobalFlags, flags *podmanUpgradeFlags, _ *cobra.Com return err } - authFile, cleaner, err := shared_podman.PodmanLogin(hostData, flags.SCC) + authFile, cleaner, err := shared_podman.PodmanLogin(hostData, flags.Installation.SCC) if err != nil { return utils.Errorf(err, L("failed to login to registry.suse.com")) } diff --git a/mgradm/cmd/upgrade/shared/flags.go b/mgradm/cmd/upgrade/shared/flags.go index d11046cde..1d165e2fd 100644 --- a/mgradm/cmd/upgrade/shared/flags.go +++ b/mgradm/cmd/upgrade/shared/flags.go @@ -7,17 +7,8 @@ package shared import ( "github.com/spf13/cobra" "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared/types" ) -// UpgradeFlags represents flags used for upgrading a server. -type UpgradeFlags struct { - Image types.ImageFlags `mapstructure:",squash"` - DBUpgradeImage types.ImageFlags `mapstructure:"dbupgrade"` - Coco utils.CocoFlags - HubXmlrpc utils.HubXmlrpcFlags -} - // AddUpgradeFlags add upgrade flags to a command. func AddUpgradeFlags(cmd *cobra.Command) { utils.AddImageFlag(cmd) diff --git a/mgradm/shared/kubernetes/deployment_test.go b/mgradm/shared/kubernetes/deployment_test.go index 7df05cabd..e4f5578e7 100644 --- a/mgradm/shared/kubernetes/deployment_test.go +++ b/mgradm/shared/kubernetes/deployment_test.go @@ -27,7 +27,7 @@ func TestGetRunningServerImage(t *testing.T) { } for i, test := range data { - runCmdOutput = func(logLevel zerolog.Level, command string, args ...string) ([]byte, error) { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { return []byte(test.out), test.err } actual := getRunningServerImage("myns") diff --git a/mgradm/shared/kubernetes/flags.go b/mgradm/shared/kubernetes/flags.go new file mode 100644 index 000000000..427692ee2 --- /dev/null +++ b/mgradm/shared/kubernetes/flags.go @@ -0,0 +1,17 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" + +// KubernetesServerFlags is the aggregation of all flags for install, upgrade and migrate. +type KubernetesServerFlags struct { + utils.ServerFlags `mapstructure:",squash"` + Helm utils.HelmFlags + // SSH defines the SSH configuration to use to connect to the source server to migrate. + SSH utils.SSHFlags +} diff --git a/mgradm/shared/utils/flags.go b/mgradm/shared/utils/flags.go new file mode 100644 index 000000000..dee0770a0 --- /dev/null +++ b/mgradm/shared/utils/flags.go @@ -0,0 +1,138 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package utils + +import ( + "fmt" + "net/mail" + "regexp" + "strings" + + "github.com/spf13/cobra" + apiTypes "github.com/uyuni-project/uyuni-tools/shared/api/types" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// ServerFlags is a structure hosting the parameters for installation, migration and upgrade. +type ServerFlags struct { + Image types.ImageFlags `mapstructure:",squash"` + Coco CocoFlags + Mirror string + HubXmlrpc HubXmlrpcFlags + Migration MigrationFlags `mapstructure:",squash"` + Installation InstallationFlags `mapstructure:",squash"` + // DBUpgradeImage is the image to use to perform the database upgrade. + DBUpgradeImage types.ImageFlags `mapstructure:"dbupgrade"` +} + +// MigrationFlags contains the parameters that are used only for migration. +type MigrationFlags struct { + // Prepare defines whether to run the full migration or just the data synchronization. + Prepare bool + // SourceUser is the username to use to connect to the source server in a migration. + User string +} + +// InstallationFlags contains the parameters that are used only for the installation of a new server. +type InstallationFlags struct { + TZ string + Email string + EmailFrom string + IssParent string + Tftp bool + DB DBFlags + ReportDB DBFlags + SSL InstallSSLFlags + SCC types.SCCCredentials + Debug DebugFlags + Admin apiTypes.User + Organization string +} + +// CheckParameters checks parameters for install command. +func (flags *InstallationFlags) CheckParameters(cmd *cobra.Command, command string) { + if flags.DB.Password == "" { + flags.DB.Password = utils.GetRandomBase64(30) + } + + if flags.ReportDB.Password == "" { + flags.ReportDB.Password = utils.GetRandomBase64(30) + } + + // Make sure we have all the required 3rd party flags or none + flags.SSL.CheckParameters() + + // Since we use cert-manager for self-signed certificates on kubernetes we don't need password for it + if !flags.SSL.UseExisting() && command == "podman" { + utils.AskPasswordIfMissing(&flags.SSL.Password, cmd.Flag("ssl-password").Usage, 0, 0) + } + + // Use the host timezone if the user didn't define one + if flags.TZ == "" { + flags.TZ = utils.GetLocalTimezone() + } + + utils.AskIfMissing(&flags.Email, cmd.Flag("email").Usage, 1, 128, emailChecker) + utils.AskIfMissing(&flags.EmailFrom, cmd.Flag("emailfrom").Usage, 0, 0, emailChecker) + + utils.AskIfMissing(&flags.Admin.Login, cmd.Flag("admin-login").Usage, 1, 64, idChecker) + utils.AskPasswordIfMissing(&flags.Admin.Password, cmd.Flag("admin-password").Usage, 5, 48) + utils.AskIfMissing(&flags.Organization, cmd.Flag("organization").Usage, 3, 128, nil) + + flags.SSL.Email = flags.Email + flags.Admin.Email = flags.Email +} + +// DBFlags can store all values required to connect to a database. +type DBFlags struct { + Host string + Name string + Port int + User string + Password string + Protocol string + Provider string + Admin struct { + User string + Password string + } +} + +// DebugFlags contains information about enabled/disabled debug. +type DebugFlags struct { + Java bool +} + +// idChecker verifies that the value is a valid identifier. +func idChecker(value string) bool { + r := regexp.MustCompile(`^([[:alnum:]]|[._-])+$`) + if r.MatchString(value) { + return true + } + fmt.Println(L("Can only contain letters, digits . _ and -")) + return false +} + +// emailChecker verifies that the value is a valid email address. +func emailChecker(value string) bool { + address, err := mail.ParseAddress(value) + if err != nil || address.Name != "" || strings.ContainsAny(value, "<>") { + fmt.Println(L("Not a valid email address")) + return false + } + return true +} + +// SSHFlags is the structure holding the SSH configuration to use to connect to the source server to migrate. +type SSHFlags struct { + Key struct { + Public string + Private string + } + Knownhosts string + Config string +} diff --git a/mgradm/cmd/install/shared/flags_test.go b/mgradm/shared/utils/flags_test.go similarity index 98% rename from mgradm/cmd/install/shared/flags_test.go rename to mgradm/shared/utils/flags_test.go index 2ee7d6d30..9f43f5c9b 100644 --- a/mgradm/cmd/install/shared/flags_test.go +++ b/mgradm/shared/utils/flags_test.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package shared +package utils import "testing" diff --git a/shared/testutils/flagstests/mgradm_install.go b/shared/testutils/flagstests/mgradm_install.go index 7cd401223..edfb76f47 100644 --- a/shared/testutils/flagstests/mgradm_install.go +++ b/shared/testutils/flagstests/mgradm_install.go @@ -7,7 +7,7 @@ package flagstests import ( "testing" - "github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/shared" + "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" "github.com/uyuni-project/uyuni-tools/shared/testutils" ) @@ -58,42 +58,47 @@ var InstallFlagsTestArgs = func() []string { } // AssertInstallFlags checks that all the install flags are parsed correctly. -func AssertInstallFlags(t *testing.T, flags *shared.InstallFlags) { - testutils.AssertEquals(t, "Error parsing --tz", "CEST", flags.TZ) - testutils.AssertEquals(t, "Error parsing --email", "admin@foo.bar", flags.Email) - testutils.AssertEquals(t, "Error parsing --emailfrom", "sender@foo.bar", flags.EmailFrom) - testutils.AssertEquals(t, "Error parsing --issParent", "parent.iss.com", flags.IssParent) - testutils.AssertEquals(t, "Error parsing --db-user", "dbuser", flags.DB.User) - testutils.AssertEquals(t, "Error parsing --db-password", "dbpass", flags.DB.Password) - testutils.AssertEquals(t, "Error parsing --db-name", "dbname", flags.DB.Name) - testutils.AssertEquals(t, "Error parsing --db-host", "dbhost", flags.DB.Host) - testutils.AssertEquals(t, "Error parsing --db-port", 1234, flags.DB.Port) - testutils.AssertEquals(t, "Error parsing --db-protocol", "dbprot", flags.DB.Protocol) - testutils.AssertEquals(t, "Error parsing --db-admin-user", "dbadmin", flags.DB.Admin.User) - testutils.AssertEquals(t, "Error parsing --db-admin-password", "dbadminpass", flags.DB.Admin.Password) - testutils.AssertEquals(t, "Error parsing --db-provider", "aws", flags.DB.Provider) - testutils.AssertEquals(t, "Error parsing --tftp", false, flags.Tftp) - testutils.AssertEquals(t, "Error parsing --reportdb-user", "reportdbuser", flags.ReportDB.User) - testutils.AssertEquals(t, "Error parsing --reportdb-password", "reportdbpass", flags.ReportDB.Password) - testutils.AssertEquals(t, "Error parsing --reportdb-name", "reportdbname", flags.ReportDB.Name) - testutils.AssertEquals(t, "Error parsing --reportdb-host", "reportdbhost", flags.ReportDB.Host) - testutils.AssertEquals(t, "Error parsing --reportdb-port", 5678, flags.ReportDB.Port) - AssertSSLGenerationFlags(t, &flags.SSL.SSLCertGenerationFlags) - testutils.AssertEquals(t, "Error parsing --ssl-password", "sslsecret", flags.SSL.Password) +func AssertInstallFlags(t *testing.T, flags *utils.ServerFlags) { + testutils.AssertEquals(t, "Error parsing --tz", "CEST", flags.Installation.TZ) + testutils.AssertEquals(t, "Error parsing --email", "admin@foo.bar", flags.Installation.Email) + testutils.AssertEquals(t, "Error parsing --emailfrom", "sender@foo.bar", flags.Installation.EmailFrom) + testutils.AssertEquals(t, "Error parsing --issParent", "parent.iss.com", flags.Installation.IssParent) + testutils.AssertEquals(t, "Error parsing --db-user", "dbuser", flags.Installation.DB.User) + testutils.AssertEquals(t, "Error parsing --db-password", "dbpass", flags.Installation.DB.Password) + testutils.AssertEquals(t, "Error parsing --db-name", "dbname", flags.Installation.DB.Name) + testutils.AssertEquals(t, "Error parsing --db-host", "dbhost", flags.Installation.DB.Host) + testutils.AssertEquals(t, "Error parsing --db-port", 1234, flags.Installation.DB.Port) + testutils.AssertEquals(t, "Error parsing --db-protocol", "dbprot", flags.Installation.DB.Protocol) + testutils.AssertEquals(t, "Error parsing --db-admin-user", "dbadmin", flags.Installation.DB.Admin.User) + testutils.AssertEquals(t, "Error parsing --db-admin-password", "dbadminpass", flags.Installation.DB.Admin.Password) + testutils.AssertEquals(t, "Error parsing --db-provider", "aws", flags.Installation.DB.Provider) + testutils.AssertEquals(t, "Error parsing --tftp", false, flags.Installation.Tftp) + testutils.AssertEquals(t, "Error parsing --reportdb-user", "reportdbuser", flags.Installation.ReportDB.User) + testutils.AssertEquals(t, "Error parsing --reportdb-password", "reportdbpass", flags.Installation.ReportDB.Password) + testutils.AssertEquals(t, "Error parsing --reportdb-name", "reportdbname", flags.Installation.ReportDB.Name) + testutils.AssertEquals(t, "Error parsing --reportdb-host", "reportdbhost", flags.Installation.ReportDB.Host) + testutils.AssertEquals(t, "Error parsing --reportdb-port", 5678, flags.Installation.ReportDB.Port) + testutils.AssertEquals(t, "Error parsing --ssl-cname", []string{"cname1", "cname2"}, flags.Installation.SSL.Cnames) + testutils.AssertEquals(t, "Error parsing --ssl-country", "OS", flags.Installation.SSL.Country) + testutils.AssertEquals(t, "Error parsing --ssl-state", "sslstate", flags.Installation.SSL.State) + testutils.AssertEquals(t, "Error parsing --ssl-city", "sslcity", flags.Installation.SSL.City) + testutils.AssertEquals(t, "Error parsing --ssl-org", "sslorg", flags.Installation.SSL.Org) + testutils.AssertEquals(t, "Error parsing --ssl-ou", "sslou", flags.Installation.SSL.OU) + testutils.AssertEquals(t, "Error parsing --ssl-password", "sslsecret", flags.Installation.SSL.Password) testutils.AssertEquals(t, "Error parsing --ssl-ca-intermediate", - []string{"path/inter1.crt", "path/inter2.crt"}, flags.SSL.Ca.Intermediate, + []string{"path/inter1.crt", "path/inter2.crt"}, flags.Installation.SSL.Ca.Intermediate, ) - testutils.AssertEquals(t, "Error parsing --ssl-ca-root", "path/root.crt", flags.SSL.Ca.Root) - testutils.AssertEquals(t, "Error parsing --ssl-server-cert", "path/srv.crt", flags.SSL.Server.Cert) - testutils.AssertEquals(t, "Error parsing --ssl-server-key", "path/srv.key", flags.SSL.Server.Key) - testutils.AssertTrue(t, "Error parsing --debug-java", flags.Debug.Java) - testutils.AssertEquals(t, "Error parsing --admin-login", "adminuser", flags.Admin.Login) - testutils.AssertEquals(t, "Error parsing --admin-password", "adminpass", flags.Admin.Password) - testutils.AssertEquals(t, "Error parsing --admin-firstName", "adminfirst", flags.Admin.FirstName) - testutils.AssertEquals(t, "Error parsing --admin-lastName", "adminlast", flags.Admin.LastName) - testutils.AssertEquals(t, "Error parsing --organization", "someorg", flags.Organization) + testutils.AssertEquals(t, "Error parsing --ssl-ca-root", "path/root.crt", flags.Installation.SSL.Ca.Root) + testutils.AssertEquals(t, "Error parsing --ssl-server-cert", "path/srv.crt", flags.Installation.SSL.Server.Cert) + testutils.AssertEquals(t, "Error parsing --ssl-server-key", "path/srv.key", flags.Installation.SSL.Server.Key) + testutils.AssertTrue(t, "Error parsing --debug-java", flags.Installation.Debug.Java) + testutils.AssertEquals(t, "Error parsing --admin-login", "adminuser", flags.Installation.Admin.Login) + testutils.AssertEquals(t, "Error parsing --admin-password", "adminpass", flags.Installation.Admin.Password) + testutils.AssertEquals(t, "Error parsing --admin-firstName", "adminfirst", flags.Installation.Admin.FirstName) + testutils.AssertEquals(t, "Error parsing --admin-lastName", "adminlast", flags.Installation.Admin.LastName) + testutils.AssertEquals(t, "Error parsing --organization", "someorg", flags.Installation.Organization) AssertMirrorFlag(t, flags.Mirror) - AssertSCCFlag(t, &flags.SCC) + AssertSCCFlag(t, &flags.Installation.SCC) AssertImageFlag(t, &flags.Image) AssertCocoFlag(t, &flags.Coco) AssertHubXmlrpcFlag(t, &flags.HubXmlrpc) From e30054726823475ad58fbff9cb32bbb09db80672 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 8 Aug 2024 12:21:25 +0200 Subject: [PATCH 10/19] Kubernetes migration refactoring Migration to kubernetes is rather fragile, with: 1. tasks running in `kubectl exec` or as `pod`. 2. the uyuni helm chart being deployed multiple times 3. `hostPath` mounts are used everywhere for the scripts to run and data to read and force the script to run on the cluster node. Here are the solutions to those problems: 1. Each step will run as a Job and those won't be deleted automatically for the user to access their logs after. 2. Stop using the helm chart and deploy the resources when we need them. This will allow more control of what runs when and reduces the number of useless starts of the giant container. Postgresql DB upgrade will disable SSL temporarily in the postgresql.conf in order to not rely on the SSL certificates to be migrated. 3. The scripts to run for each step will be passed directly as `sh -c` parameter to the generated Jobs. The migration data are be stored in a special volume and not on the host. As a collateral, SSH agent can no longer be used as that would require running on a cluster node again. The tool now creates a ConfigMap to store the SSH config and known_hosts and a Secret for a passwordless SSH key. The PersistentVolumes are not destroyed after the end of the first job and are then reused by the next ones and the final deployment. Using Kubernetes API modules also helps for code reuse with a future operator. Note that the old postgresql database cannot be moved to a separate PersistentVolumes. As we run a `db_upgrade --link`, the old database is linked by the new one and cannot be disposed of. --- go.mod | 30 +- go.sum | 94 ++++- mgradm/cmd/install/kubernetes/utils.go | 11 +- .../cmd/migrate/kubernetes/dataExtractor.go | 76 ++++ mgradm/cmd/migrate/kubernetes/kubernetes.go | 33 +- .../cmd/migrate/kubernetes/kubernetes_test.go | 10 + mgradm/cmd/migrate/kubernetes/migrationJob.go | 106 ++++++ mgradm/cmd/migrate/kubernetes/ssh.go | 166 +++++++++ mgradm/cmd/migrate/kubernetes/ssl.go | 51 +++ mgradm/cmd/migrate/kubernetes/utils.go | 331 +++++++++--------- mgradm/shared/kubernetes/certificates.go | 95 ++--- mgradm/shared/kubernetes/coco.go | 101 ++++++ mgradm/shared/kubernetes/db.go | 44 +++ mgradm/shared/kubernetes/dbFinalize.go | 64 ++++ mgradm/shared/kubernetes/dbUpgradeJob.go | 79 +++++ mgradm/shared/kubernetes/deployment.go | 277 +++++++++++++++ mgradm/shared/kubernetes/flags.go | 1 + mgradm/shared/kubernetes/hubApi.go | 101 ++++++ mgradm/shared/kubernetes/ingress.go | 206 +++++++++++ mgradm/shared/kubernetes/install.go | 15 +- mgradm/shared/kubernetes/k3s.go | 2 +- mgradm/shared/kubernetes/namespace.go | 26 ++ mgradm/shared/kubernetes/postUpgradeJob.go | 39 +++ mgradm/shared/kubernetes/services.go | 112 ++++++ mgradm/shared/kubernetes/traefik.go | 127 +++++++ mgradm/shared/kubernetes/traefik_test.go | 82 +++++ mgradm/shared/podman/podman.go | 6 +- mgradm/shared/templates/issuerTemplate.go | 6 + .../shared/templates/migrateScriptTemplate.go | 12 +- .../templates/pgsqlFinalizeScriptTemplate.go | 9 +- .../pgsqlVersionUpgradeScriptTemplate.go | 24 +- .../templates/reusedCaIssuerTemplate.go | 8 +- mgradm/shared/templates/tlsSecret.go | 4 + mgradm/shared/utils/cmd_utils.go | 33 ++ mgradm/shared/utils/exec.go | 2 - mgradm/shared/utils/types.go | 25 ++ mgrpxy/cmd/install/kubernetes/utils.go | 4 +- mgrpxy/shared/kubernetes/deploy.go | 4 +- shared/kubernetes/apply.go | 62 ++++ shared/kubernetes/converters.go | 43 +++ shared/kubernetes/job.go | 78 +++++ shared/kubernetes/k3s.go | 4 +- shared/kubernetes/kubernetes.go | 76 ++-- shared/kubernetes/pod.go | 103 ++++++ shared/kubernetes/pvc.go | 264 ++++++++++++++ shared/kubernetes/pvc_test.go | 34 ++ shared/kubernetes/utils.go | 210 ++++++----- shared/kubernetes/waiters.go | 100 ++++++ shared/ssl/ssl.go | 32 +- shared/ssl/ssl_test.go | 3 +- shared/testutils/flagstests/mgradm.go | 30 +- shared/types/deployment.go | 2 + shared/utils/exec.go | 11 +- shared/utils/inspector.go | 7 + shared/utils/ports.go | 86 +++-- shared/utils/ports_test.go | 2 +- shared/utils/utils.go | 1 + shared/utils/volumes.go | 61 ++-- 58 files changed, 3160 insertions(+), 465 deletions(-) create mode 100644 mgradm/cmd/migrate/kubernetes/dataExtractor.go create mode 100644 mgradm/cmd/migrate/kubernetes/migrationJob.go create mode 100644 mgradm/cmd/migrate/kubernetes/ssh.go create mode 100644 mgradm/cmd/migrate/kubernetes/ssl.go create mode 100644 mgradm/shared/kubernetes/coco.go create mode 100644 mgradm/shared/kubernetes/db.go create mode 100644 mgradm/shared/kubernetes/dbFinalize.go create mode 100644 mgradm/shared/kubernetes/dbUpgradeJob.go create mode 100644 mgradm/shared/kubernetes/hubApi.go create mode 100644 mgradm/shared/kubernetes/ingress.go create mode 100644 mgradm/shared/kubernetes/namespace.go create mode 100644 mgradm/shared/kubernetes/postUpgradeJob.go create mode 100644 mgradm/shared/kubernetes/services.go create mode 100644 mgradm/shared/kubernetes/traefik.go create mode 100644 mgradm/shared/kubernetes/traefik_test.go create mode 100644 shared/kubernetes/apply.go create mode 100644 shared/kubernetes/converters.go create mode 100644 shared/kubernetes/job.go create mode 100644 shared/kubernetes/pod.go create mode 100644 shared/kubernetes/pvc.go create mode 100644 shared/kubernetes/pvc_test.go create mode 100644 shared/kubernetes/waiters.go diff --git a/go.mod b/go.mod index 1a2701df8..1888465a6 100644 --- a/go.mod +++ b/go.mod @@ -2,16 +2,38 @@ module github.com/uyuni-project/uyuni-tools go 1.21 +toolchain go1.21.1 + require ( github.com/Netflix/go-expect v0.0.0-20220104043353-73e0943537d2 github.com/briandowns/spinner v1.23.0 github.com/chai2010/gettext-go v1.0.2 github.com/spf13/cobra v1.8.0 + k8s.io/api v0.29.7 + k8s.io/apimachinery v0.29.7 + k8s.io/cli-runtime v0.29.7 ) require ( - github.com/creack/pty v1.1.17 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/creack/pty v1.1.18 // indirect github.com/fatih/color v1.7.0 // indirect + github.com/go-logr/logr v1.3.0 // indirect + github.com/gogo/protobuf v1.3.2 // indirect + github.com/google/gofuzz v1.2.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect + github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + golang.org/x/net v0.23.0 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + k8s.io/client-go v0.29.7 // indirect + k8s.io/klog/v2 v2.110.1 // indirect + k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + sigs.k8s.io/yaml v1.3.0 // indirect ) require ( @@ -30,9 +52,9 @@ require ( github.com/spf13/pflag v1.0.5 github.com/spf13/viper v1.7.0 github.com/subosito/gotenv v1.2.0 // indirect - golang.org/x/sys v0.22.0 // indirect - golang.org/x/term v0.22.0 - golang.org/x/text v0.16.0 // indirect + golang.org/x/sys v0.18.0 // indirect + golang.org/x/term v0.18.0 + golang.org/x/text v0.14.0 // indirect gopkg.in/ini.v1 v1.51.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v2 v2.4.0 diff --git a/go.sum b/go.sum index d4a59e229..1887571d4 100644 --- a/go.sum +++ b/go.sum @@ -11,6 +11,8 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= @@ -39,8 +41,9 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI= github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -55,10 +58,14 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= +github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -71,6 +78,12 @@ github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Z github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= +github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -108,19 +121,26 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= +github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -142,8 +162,14 @@ github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0Qu github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= +github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= @@ -166,6 +192,8 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= +github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.30.0 h1:SymVODrcRsaRaSInD9yQtKbtWqwsfoPcRff/oRXLj4c= github.com/rs/zerolog v1.30.0/go.mod h1:/tk+P47gFdPXq4QYjvCmT5/Gsug2nagsFWBWhAiSi1w= @@ -196,12 +224,15 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= @@ -213,6 +244,8 @@ golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -231,6 +264,8 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -245,6 +280,10 @@ golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -253,6 +292,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -266,19 +307,22 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= +golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -298,7 +342,13 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -322,9 +372,12 @@ google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiq google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= @@ -333,6 +386,7 @@ gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -342,4 +396,22 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +k8s.io/api v0.29.7 h1:Q2/thp7YYESgy0MGzxT9RvA/6doLJHBXSFH8GGLxSbc= +k8s.io/api v0.29.7/go.mod h1:mPimdbyuIjwoLtBEVIGVUYb4BKOE+44XHt/n4IqKsLA= +k8s.io/apimachinery v0.29.7 h1:ICXzya58Q7hyEEfnTrbmdfX1n1schSepX2KUfC2/ykc= +k8s.io/apimachinery v0.29.7/go.mod h1:i3FJVwhvSp/6n8Fl4K97PJEP8C+MM+aoDq4+ZJBf70Y= +k8s.io/cli-runtime v0.29.7 h1:6IxyxaIm3N31+PKXb1K7Tpf+100mm9hd9HMMYWMH2QE= +k8s.io/cli-runtime v0.29.7/go.mod h1:0pcclC4k3rkzYNAvw3zeiPNtg8Buv0orK+5MuhEKFBU= +k8s.io/client-go v0.29.7 h1:vTtiFrGBKlcBhxaeZC4eDrqui1e108nsTyue/KU63IY= +k8s.io/client-go v0.29.7/go.mod h1:69BvVqdRozgR/9TP45u/oO0tfrdbP+I8RqrcCJQshzg= +k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0= +k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= +k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= +sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= +sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= diff --git a/mgradm/cmd/install/kubernetes/utils.go b/mgradm/cmd/install/kubernetes/utils.go index 36fcc5e94..d44a60448 100644 --- a/mgradm/cmd/install/kubernetes/utils.go +++ b/mgradm/cmd/install/kubernetes/utils.go @@ -61,13 +61,12 @@ func installForKubernetes( // Deploy the SSL CA or server certificate if flags.Installation.SSL.UseExisting() { - if err := kubernetes.DeployExistingCertificate(&flags.Helm, &flags.Installation.SSL); err != nil { + if err := kubernetes.DeployExistingCertificate(flags.Helm.Uyuni.Namespace, &flags.Installation.SSL); err != nil { return err } } else { - sslArgs, err := kubernetes.DeployCertificate( - &flags.Helm, &flags.Installation.SSL, clusterInfos.GetKubeconfig(), fqdn, - flags.Image.PullPolicy, + sslArgs, err := kubernetes.DeployGeneratedCa( + &flags.Helm, &flags.Installation.SSL, clusterInfos.GetKubeconfig(), fqdn, flags.Image.PullPolicy, ) if err != nil { @@ -77,7 +76,9 @@ func installForKubernetes( } // Create a secret using SCC credentials if any are provided - helmArgs, err = shared_kubernetes.AddSCCSecret(helmArgs, flags.Helm.Uyuni.Namespace, &flags.Installation.SCC) + helmArgs, err = shared_kubernetes.AddSCCSecret( + helmArgs, flags.Helm.Uyuni.Namespace, &flags.Installation.SCC, shared_kubernetes.ServerApp, + ) if err != nil { return err } diff --git a/mgradm/cmd/migrate/kubernetes/dataExtractor.go b/mgradm/cmd/migrate/kubernetes/dataExtractor.go new file mode 100644 index 000000000..330e0e243 --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/dataExtractor.go @@ -0,0 +1,76 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "errors" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + "gopkg.in/yaml.v2" +) + +// MigrationData represents the files and data extracted from the migration sync phase. +type MigrationData struct { + CaKey string + CaCert string + Data *utils.InspectResult + ServerCert string + ServerKey string +} + +func extractMigrationData( + namespace string, + image string, + pullPolicy string, + pullSecret string, + volume types.VolumeMount, +) (*MigrationData, error) { + // Run a pod reading the extracted data files from the volume. + // The data are written as a YAML dictionary where the key is the file name and the value its content. + out, err := kubernetes.RunPodLogs(namespace, "uyuni-data-extractor", image, + pullPolicy, pullSecret, []types.VolumeMount{volume}, + "sh", "-c", + "for f in /var/lib/uyuni-tools/*; do echo \"`basename $f`: |2\"; cat $f | sed 's/^/ /'; done", + ) + if err != nil { + return nil, err + } + + // Parse the content + files := make(map[string]string) + if err := yaml.Unmarshal(out, &files); err != nil { + return nil, utils.Errorf(err, L("failed to parse data extractor pod output")) + } + + var result MigrationData + for file, content := range files { + if file == "RHN-ORG-PRIVATE-SSL-KEY" { + result.CaKey = content + } else if file == "RHN-ORG-TRUSTED-SSL-CERT" { + result.CaCert = content + } else if file == "spacewalk.crt" { + result.ServerCert = content + } else if file == "spacewalk.key" { + result.ServerKey = content + } else if file == "data" { + parsedData, err := utils.ReadInspectDataString[utils.InspectResult]([]byte(content)) + if err != nil { + return nil, utils.Errorf(err, L("failed to parse migration data file")) + } + result.Data = parsedData + } + } + + if result.Data == nil { + return nil, errors.New(L("found no data file after migration")) + } + + return &result, nil +} diff --git a/mgradm/cmd/migrate/kubernetes/kubernetes.go b/mgradm/cmd/migrate/kubernetes/kubernetes.go index 48079bf13..6829c9308 100644 --- a/mgradm/cmd/migrate/kubernetes/kubernetes.go +++ b/mgradm/cmd/migrate/kubernetes/kubernetes.go @@ -18,7 +18,7 @@ import ( ) func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetes.KubernetesServerFlags]) *cobra.Command { - migrateCmd := &cobra.Command{ + cmd := &cobra.Command{ Use: "kubernetes [source server FQDN]", Short: L("Migrate a remote server to containers running on a kubernetes cluster"), Long: L(`Migrate a remote server to containers running on a kubernetes cluster @@ -26,15 +26,16 @@ func newCmd(globalFlags *types.GlobalFlags, run utils.CommandFunc[kubernetes.Kub This migration command assumes a few things: * the SSH configuration for the source server is complete, including user and all needed options to connect to the machine, - * an SSH agent is started and the key to use to connect to the server is added to it, * kubectl and helm are installed locally, * a working kubectl configuration should be set to connect to the cluster to deploy to +The SSH parameters may be left empty if the target Kubernetes namespace contains: + * an uyuni-migration-config ConfigMap with config and known_hosts items, + * an uyuni-migration-key secret with key and key.pub items with a passwordless key. + When migrating a server with a automatically generated SSL Root CA certificate, the private key password will be required to convert it to RSA in a kubernetes secret. This is not needed if the source server does not have a generated SSL CA certificate. - -NOTE: migrating to a remote cluster is not supported yet! `), Args: cobra.ExactArgs(1), RunE: func(cmd *cobra.Command, args []string) error { @@ -47,11 +48,27 @@ NOTE: migrating to a remote cluster is not supported yet! }, } - shared.AddMigrateFlags(migrateCmd) - cmd_utils.AddHelmInstallFlag(migrateCmd) - migrateCmd.Flags().String("ssl-password", "", L("SSL CA generated private key password")) + shared.AddMigrateFlags(cmd) + cmd_utils.AddHelmInstallFlag(cmd) + cmd_utils.AddVolumesFlags(cmd) + + cmd.Flags().String("ssl-password", "", L("SSL CA generated private key password")) + + cmd.Flags().String("ssh-key-public", "", L("Path to the SSH public key to use to connect to the source server")) + cmd.Flags().String("ssh-key-private", "", + L("Path to the passwordless SSH private key to use to connect to the source server"), + ) + cmd.Flags().String("ssh-knownhosts", "", L("Path to the SSH known_hosts file to use to connect to the source server")) + cmd.Flags().String("ssh-config", "", L("Path to the SSH configuration file to use to connect to the source server")) + + const sshGroupID = "ssh" + _ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: sshGroupID, Title: L("SSH Configuration Flags")}) + _ = utils.AddFlagToHelpGroupID(cmd, "ssh-key-public", sshGroupID) + _ = utils.AddFlagToHelpGroupID(cmd, "ssh-key-private", sshGroupID) + _ = utils.AddFlagToHelpGroupID(cmd, "ssh-knownhosts", sshGroupID) + _ = utils.AddFlagToHelpGroupID(cmd, "ssh-config", sshGroupID) - return migrateCmd + return cmd } // NewCommand for kubernetes migration. diff --git a/mgradm/cmd/migrate/kubernetes/kubernetes_test.go b/mgradm/cmd/migrate/kubernetes/kubernetes_test.go index 1c846d63b..4e4bc44ba 100644 --- a/mgradm/cmd/migrate/kubernetes/kubernetes_test.go +++ b/mgradm/cmd/migrate/kubernetes/kubernetes_test.go @@ -21,6 +21,10 @@ func TestParamsParsing(t *testing.T) { "--prepare", "--user", "sudoer", "--ssl-password", "sslsecret", + "--ssh-key-public", "path/ssh.pub", + "--ssh-key-private", "path/ssh", + "--ssh-knownhosts", "path/known_hosts", + "--ssh-config", "path/config", "source.fq.dn", } @@ -31,6 +35,7 @@ func TestParamsParsing(t *testing.T) { args = append(args, flagstests.CocoFlagsTestArgs...) args = append(args, flagstests.HubXmlrpcFlagsTestArgs...) args = append(args, flagstests.ServerHelmFlagsTestArgs...) + args = append(args, flagstests.VolumesFlagsTestExpected...) // Test function asserting that the args are properly parsed tester := func(_ *types.GlobalFlags, flags *kubernetes.KubernetesServerFlags, @@ -45,7 +50,12 @@ func TestParamsParsing(t *testing.T) { flagstests.AssertHubXmlrpcFlag(t, &flags.HubXmlrpc) testutils.AssertEquals(t, "Error parsing --user", "sudoer", flags.Migration.User) flagstests.AssertServerHelmFlags(t, &flags.Helm) + flagstests.AssertVolumesFlags(t, &flags.Volumes) testutils.AssertEquals(t, "Error parsing --ssl-password", "sslsecret", flags.Installation.SSL.Password) + testutils.AssertEquals(t, "Error parsing --ssh-key-public", "path/ssh.pub", flags.SSH.Key.Public) + testutils.AssertEquals(t, "Error parsing --ssh-key-private", "path/ssh", flags.SSH.Key.Private) + testutils.AssertEquals(t, "Error parsing --ssh-knownhosts", "path/known_hosts", flags.SSH.Knownhosts) + testutils.AssertEquals(t, "Error parsing --ssh-config", "path/config", flags.SSH.Config) testutils.AssertEquals(t, "Wrong FQDN", "source.fq.dn", args[0]) return nil } diff --git a/mgradm/cmd/migrate/kubernetes/migrationJob.go b/mgradm/cmd/migrate/kubernetes/migrationJob.go new file mode 100644 index 000000000..36c984f44 --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/migrationJob.go @@ -0,0 +1,106 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + batch "k8s.io/api/batch/v1" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const migrationJobName = "uyuni-data-sync" + +// Prepares and starts the synchronization job. +// +// This assumes the SSH key is stored in an uyuni-migration-key secret +// and the SSH config in an uyuni-migration-ssh ConfigMap with config and known_hosts keys. +func startMigrationJob( + namespace string, + serverImage string, + pullPolicy string, + pullSecret string, + fqdn string, + user string, + prepare bool, + mounts []types.VolumeMount, +) (string, error) { + job, err := getMigrationJob( + namespace, + serverImage, + pullPolicy, + pullSecret, + mounts, + fqdn, + user, + prepare, + ) + if err != nil { + return "", err + } + + // Run the job + return job.ObjectMeta.Name, kubernetes.Apply([]runtime.Object{job}, L("failed to run the migration job")) +} + +func getMigrationJob( + namespace string, + image string, + pullPolicy string, + pullSecret string, + mounts []types.VolumeMount, + sourceFqdn string, + user string, + prepare bool, +) (*batch.Job, error) { + // Add mount and volume for the uyuni-migration-key secret with key and key.pub items + keyMount := core.VolumeMount{Name: "ssh-key", MountPath: "/root/.ssh/id_rsa", SubPath: "id_rsa"} + pubKeyMount := core.VolumeMount{Name: "ssh-key", MountPath: "/root/.ssh/id_rsa.pub", SubPath: "id_rsa.pub"} + + keyVolume := kubernetes.CreateSecretVolume("ssh-key", "uyuni-migration-key") + var keyMode int32 = 0600 + keyVolume.VolumeSource.Secret.Items = []core.KeyToPath{ + {Key: "key", Path: "id_rsa", Mode: &keyMode}, + {Key: "key.pub", Path: "id_rsa.pub"}, + } + + // Add mounts and volume for the uyuni-migration-ssh config map + // We need one mount for each file using subPath to not have 2 mounts on the same folder + knownHostsMount := core.VolumeMount{Name: "ssh-conf", MountPath: "/root/.ssh/known_hosts", SubPath: "known_hosts"} + sshConfMount := core.VolumeMount{Name: "ssh-conf", MountPath: "/root/.ssh/config", SubPath: "config"} + sshVolume := kubernetes.CreateConfigVolume("ssh-conf", "uyuni-migration-ssh") + + // Prepare the script + scriptData := templates.MigrateScriptTemplateData{ + Volumes: utils.ServerVolumeMounts, + SourceFqdn: sourceFqdn, + User: user, + Kubernetes: true, + Prepare: prepare, + } + + job, err := kubernetes.GetScriptJob(namespace, migrationJobName, image, pullPolicy, pullSecret, mounts, scriptData) + if err != nil { + return nil, err + } + + // Append the extra volumes and mounts + volumeMounts := job.Spec.Template.Spec.Containers[0].VolumeMounts + volumes := job.Spec.Template.Spec.Volumes + + volumeMounts = append(volumeMounts, keyMount, pubKeyMount, knownHostsMount, sshConfMount) + volumes = append(volumes, keyVolume, sshVolume) + + job.Spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts + job.Spec.Template.Spec.Volumes = volumes + + return job, nil +} diff --git a/mgradm/cmd/migrate/kubernetes/ssh.go b/mgradm/cmd/migrate/kubernetes/ssh.go new file mode 100644 index 000000000..a6a48ea0f --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/ssh.go @@ -0,0 +1,166 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "encoding/json" + "errors" + "fmt" + "os" + "strings" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + sshSecretName = "uyuni-migration-key" + sshConfigName = "uyuni-migration-ssh" +) + +func checkSSH(namespace string, flags *adm_utils.SSHFlags) error { + if exists, err := checkSSHKey(namespace); err != nil { + return err + } else if !exists && flags.Key.Public != "" && flags.Key.Private != "" { + if err := createSSHSecret(namespace, flags.Key.Private, flags.Key.Public); err != nil { + return err + } + } else if !exists { + return errors.New(L("no SSH key found to use for migration")) + } + + if exists, err := checkSSHConfig(namespace); err != nil { + return err + } else if !exists && flags.Knownhosts != "" { + // The config may be empty, but not the known_hosts + if err := createSSHConfig(namespace, flags.Config, flags.Knownhosts); err != nil { + return err + } + } else if !exists { + return errors.New(L("no SSH known_hosts and configuration found to use for migration")) + } + + return nil +} + +func checkSSHKey(namespace string) (bool, error) { + exists := false + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "secret", "-n", namespace, sshSecretName, "-o", "jsonpath={.data}", + ) + if err != nil { + if strings.Contains(err.Error(), "NotFound") { + log.Debug().Msg("Not found!") + // The secret was not found, it's not really an error + return exists, nil + } + return exists, utils.Errorf(err, L("failed to get %s SSH key secret"), sshSecretName) + } + exists = true + + var data map[string]string + if err := json.Unmarshal(out, &data); err != nil { + return exists, err + } + + for _, key := range []string{"key", "key.pub"} { + if value, ok := data[key]; !ok || value == "" { + return exists, fmt.Errorf(L("%[1]s secret misses the %[2]s value"), sshSecretName, key) + } + } + + return exists, nil +} + +func createSSHSecret(namespace string, keyPath string, pubKeyPath string) error { + keyContent, err := os.ReadFile(keyPath) + if err != nil { + return utils.Errorf(err, L("failed to read key file %s"), keyPath) + } + + pubContent, err := os.ReadFile(pubKeyPath) + if err != nil { + return utils.Errorf(err, L("failed to read public key file %s"), pubKeyPath) + } + + secret := core.Secret{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Secret"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: sshSecretName, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""), + }, + // It seems serializing this object automatically transforms the secrets to base64. + Data: map[string][]byte{ + "key": keyContent, + "key.pub": pubContent, + }, + } + + return kubernetes.Apply([]runtime.Object{&secret}, L("failed to create the SSH migration secret")) +} + +func checkSSHConfig(namespace string) (bool, error) { + exists := false + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "cm", "-n", namespace, sshConfigName, "-o", "jsonpath={.data}", + ) + if err != nil { + if strings.Contains(err.Error(), "NotFound") { + // The config map was not found, it's not really an error + return exists, nil + } + return exists, utils.Errorf(err, L("failed to get %s SSH ConfigMap"), sshConfigName) + } + exists = true + + var data map[string]string + if err := json.Unmarshal(out, &data); err != nil { + return exists, utils.Errorf(err, L("failed to parse SSH ConfigMap data")) + } + + // The known_hosts has to contain at least the entry for the source server. + if value, ok := data["known_hosts"]; !ok || value == "" { + return exists, fmt.Errorf(L("%[1]s ConfigMap misses the %[2]s value"), sshSecretName, "known_hosts") + } + + // An empty config is not an error. + if _, ok := data["config"]; !ok { + return exists, fmt.Errorf(L("%[1]s ConfigMap misses the %[2]s value"), sshSecretName, "config") + } + + return exists, nil +} + +func createSSHConfig(namespace string, configPath string, KnownhostsPath string) error { + configContent, err := os.ReadFile(configPath) + if err != nil { + return utils.Errorf(err, L("failed to read SSH config file %s"), configPath) + } + + knownhostsContent, err := os.ReadFile(KnownhostsPath) + if err != nil { + return utils.Errorf(err, L("failed to read SSH known_hosts file %s"), KnownhostsPath) + } + + configMap := core.ConfigMap{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"}, + ObjectMeta: meta.ObjectMeta{Namespace: namespace, Name: sshConfigName}, + Data: map[string]string{ + "config": string(configContent), + "known_hosts": string(knownhostsContent), + }, + } + return kubernetes.Apply([]runtime.Object{&configMap}, L("failed to create the SSH migration ConfigMap")) +} diff --git a/mgradm/cmd/migrate/kubernetes/ssl.go b/mgradm/cmd/migrate/kubernetes/ssl.go new file mode 100644 index 000000000..9ff9bcdd1 --- /dev/null +++ b/mgradm/cmd/migrate/kubernetes/ssl.go @@ -0,0 +1,51 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "os" + "path" + + "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" + adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +func installExistingCertificate(namespace string, extractedData *MigrationData) error { + // Store the certificates and key to file to load them + tmpDir, cleaner, err := utils.TempDir() + if err != nil { + return err + } + defer cleaner() + + caCrtPath := path.Join(tmpDir, "ca.crt") + if err := os.WriteFile(caCrtPath, []byte(extractedData.CaCert), 0700); err != nil { + return utils.Errorf(err, L("failed to create temporary ca.crt file")) + } + + srvCrtPath := path.Join(tmpDir, "srv.crt") + if err := os.WriteFile(srvCrtPath, []byte(extractedData.ServerCert), 0700); err != nil { + return utils.Errorf(err, L("failed to create temporary srv.crt file")) + } + + srvKeyPath := path.Join(tmpDir, "srv.key") + if err := os.WriteFile(srvKeyPath, []byte(extractedData.ServerKey), 0700); err != nil { + return utils.Errorf(err, L("failed to create temporary srv.key file")) + } + + sslFlags := adm_utils.InstallSSLFlags{ + Ca: types.CaChain{Root: caCrtPath}, + Server: types.SSLPair{ + Key: srvKeyPath, + Cert: srvCrtPath, + }, + } + return kubernetes.DeployExistingCertificate(namespace, &sslFlags) +} diff --git a/mgradm/cmd/migrate/kubernetes/utils.go b/mgradm/cmd/migrate/kubernetes/utils.go index 4c073d047..12e7a6f7e 100644 --- a/mgradm/cmd/migrate/kubernetes/utils.go +++ b/mgradm/cmd/migrate/kubernetes/utils.go @@ -9,16 +9,9 @@ package kubernetes import ( "encoding/base64" "fmt" - "os/exec" - "path" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "github.com/spf13/cobra" - migration_shared "github.com/uyuni-project/uyuni-tools/mgradm/cmd/migrate/shared" "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared" shared_kubernetes "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/ssl" @@ -26,32 +19,33 @@ import ( "github.com/uyuni-project/uyuni-tools/shared/utils" ) +const migrationDataPvcName = "migration-data" + func migrateToKubernetes( _ *types.GlobalFlags, flags *kubernetes.KubernetesServerFlags, _ *cobra.Command, args []string, ) error { - for _, binary := range []string{"kubectl", "helm"} { - if _, err := exec.LookPath(binary); err != nil { - return fmt.Errorf(L("install %s before running this command"), binary) - } + namespace := flags.Helm.Uyuni.Namespace + // Create the namespace if not present + if err := kubernetes.CreateNamespace(namespace); err != nil { + return err } - cnx := shared.NewConnection("kubectl", "", shared_kubernetes.ServerFilter) - namespace, err := cnx.GetNamespace("") - if err != nil { - return utils.Errorf(err, L("failed retrieving namespace")) + + // Create the namespace if not present + if err := kubernetes.CreateNamespace(namespace); err != nil { + return err } - serverImage, err := utils.ComputeImage(flags.Image.Registry, utils.DefaultTag, flags.Image) - if err != nil { - return utils.Errorf(err, L("failed to compute image URL")) + // Check the for the required SSH key and configuration + if err := checkSSH(namespace, &flags.SSH); err != nil { + return err } - hubXmlrpcImage := "" - hubXmlrpcImage, err = utils.ComputeImage(flags.Image.Registry, flags.Image.Tag, flags.HubXmlrpc.Image) + serverImage, err := utils.ComputeImage(flags.Image.Registry, utils.DefaultTag, flags.Image) if err != nil { - return err + return utils.Errorf(err, L("failed to compute image URL")) } fqdn := args[0] @@ -59,207 +53,212 @@ func migrateToKubernetes( return err } - // Find the SSH Socket and paths for the migration - sshAuthSocket := migration_shared.GetSSHAuthSocket() - sshConfigPath, sshKnownhostsPath := migration_shared.GetSSHPaths() + mounts := kubernetes.GetServerMounts() + mounts = kubernetes.TuneMounts(mounts, &flags.Volumes) - // Prepare the migration script and folder - scriptDir, cleaner, err := adm_utils.GenerateMigrationScript(fqdn, flags.Migration.User, true, flags.Migration.Prepare) - if err != nil { - return utils.Errorf(err, L("failed to generate migration script")) - } + // Add a mount and volume for the extracted data + migrationDataVolume := types.VolumeMount{Name: migrationDataPvcName, MountPath: "/var/lib/uyuni-tools"} + migrationMounts := append(mounts, migrationDataVolume) - defer cleaner() - - // We don't need the SSL certs at this point of the migration - clusterInfos, err := shared_kubernetes.CheckCluster() - if err != nil { + if err := shared_kubernetes.CreatePersistentVolumeClaims(namespace, migrationMounts); err != nil { return err } - kubeconfig := clusterInfos.GetKubeconfig() - - // Install Uyuni with generated CA cert: an empty struct means no 3rd party cert - helmArgs := []string{} // Create a secret using SCC credentials if any are provided - helmArgs, err = shared_kubernetes.AddSCCSecret(helmArgs, flags.Helm.Uyuni.Namespace, &flags.Installation.SCC) + pullSecret, err := shared_kubernetes.GetSCCSecret( + flags.Helm.Uyuni.Namespace, &flags.Installation.SCC, shared_kubernetes.ServerApp, + ) if err != nil { return err } - // Deploy for running migration command - migrationArgs := append(helmArgs, - "--set", "migration.ssh.agentSocket="+sshAuthSocket, - "--set", "migration.ssh.configPath="+sshConfigPath, - "--set", "migration.ssh.knownHostsPath="+sshKnownhostsPath, - "--set", "migration.dataPath="+scriptDir, + jobName, err := startMigrationJob( + namespace, + serverImage, + flags.Image.PullPolicy, + pullSecret, + fqdn, + flags.Migration.User, + flags.Migration.Prepare, + migrationMounts, ) - - if err := kubernetes.Deploy( - cnx, flags.Image.Registry, &flags.Image, &flags.HubXmlrpc, - &flags.Helm, clusterInfos, fqdn, false, flags.Migration.Prepare, migrationArgs..., - ); err != nil { - return utils.Errorf(err, L("cannot run deploy")) - } - - // This is needed because folder with script needs to be mounted - // check the node before scaling down - nodeName, err := shared_kubernetes.GetNode(namespace, shared_kubernetes.ServerFilter) if err != nil { - return utils.Errorf(err, L("cannot find node running uyuni")) - } - // Run the actual migration - if err := adm_utils.RunMigration(cnx, "migrate.sh"); err != nil { - return utils.Errorf(err, L("cannot run migration")) + return err } - extractedData, err := utils.ReadInspectData[utils.InspectResult](path.Join(scriptDir, "data")) - if err != nil { - return utils.Errorf(err, L("cannot read data from container")) + // Wait for ever for the job to finish: the duration of this job depends on the amount of data to copy + if err := shared_kubernetes.WaitForJob(namespace, jobName, -1); err != nil { + return err } - // After each command we want to scale to 0 - err = shared_kubernetes.ReplicasTo(namespace, shared_kubernetes.ServerApp, 0) + // Read the extracted data from the migration volume + extractedData, err := extractMigrationData( + namespace, serverImage, flags.Image.PullPolicy, pullSecret, migrationDataVolume, + ) if err != nil { - return utils.Errorf(err, L("cannot set replicas to 0")) + return err } - if flags.Migration.Prepare { - log.Info().Msg(L("Migration prepared. Run the 'migrate' command without '--prepare' to finish the migration.")) - return nil - } + oldPgVersion := extractedData.Data.CurrentPgVersion + newPgVersion := extractedData.Data.ImagePgVersion - defer func() { - // if something is running, we don't need to set replicas to 1 - if _, err = shared_kubernetes.GetNode(namespace, shared_kubernetes.ServerFilter); err != nil { - err = shared_kubernetes.ReplicasTo(namespace, shared_kubernetes.ServerApp, 1) + // Run the DB Migration job if needed + if oldPgVersion < newPgVersion { + jobName, err := kubernetes.StartDBUpgradeJob( + namespace, flags.Image.Registry, flags.Image, flags.DBUpgradeImage, pullSecret, + oldPgVersion, newPgVersion, + ) + if err != nil { + return err } - }() - setupSSLArray, err := setupSSL( - &flags.Helm, kubeconfig, scriptDir, flags.Installation.SSL.Password, flags.Image.PullPolicy, + // Wait for ever for the job to finish: the duration of this job depends on the amount of data to upgrade + if err := shared_kubernetes.WaitForJob(namespace, jobName, -1); err != nil { + return err + } + } else if oldPgVersion > newPgVersion { + return fmt.Errorf( + L("downgrading database from PostgreSQL %[1]d to %[2]d is not supported"), oldPgVersion, newPgVersion) + } + + // Run the DB Finalization job + schemaUpdateRequired := oldPgVersion != newPgVersion + jobName, err = kubernetes.StartDBFinalizeJob( + namespace, serverImage, flags.Image.PullPolicy, pullSecret, schemaUpdateRequired, true, ) if err != nil { - return utils.Errorf(err, L("cannot setup SSL")) + return err } - helmArgs = append(helmArgs, - "--reset-values", - "--set", "timezone="+extractedData.Timezone, - ) - if flags.Mirror != "" { - log.Warn().Msgf(L("The mirror data will not be migrated, ensure it is available at %s"), flags.Mirror) - // TODO Handle claims for multi-node clusters - helmArgs = append(helmArgs, "--set", "mirror.hostPath="+flags.Mirror) + // Wait for ever for the job to finish: the duration of this job depends on the amount of data to reindex + if err := shared_kubernetes.WaitForJob(namespace, jobName, -1); err != nil { + return err } - helmArgs = append(helmArgs, setupSSLArray...) - // Run uyuni upgrade using the new ssl certificate - // We don't need to start the Hub XML-RPC API containers during the setup phase - if err = kubernetes.UyuniUpgrade( - serverImage, flags.Image.PullPolicy, 0, hubXmlrpcImage, &flags.Helm, - kubeconfig, fqdn, clusterInfos.Ingress, helmArgs..., - ); err != nil { - return utils.Errorf(err, L("cannot upgrade helm chart to image %s using new SSL certificate"), serverImage) + // Run the Post Upgrade job + jobName, err = kubernetes.StartPostUpgradeJob(namespace, serverImage, flags.Image.PullPolicy, pullSecret) + if err != nil { + return err } - if err := shared_kubernetes.WaitForDeployment(namespace, "uyuni", "uyuni"); err != nil { - return utils.Errorf(err, L("cannot wait for deployment of %s"), serverImage) + if err := shared_kubernetes.WaitForJob(namespace, jobName, 60); err != nil { + return err } - err = shared_kubernetes.ReplicasTo(namespace, shared_kubernetes.ServerApp, 0) + // Extract some data from the cluster to guess how to configure Uyuni. + clusterInfos, err := shared_kubernetes.CheckCluster() if err != nil { - return utils.Errorf(err, L("cannot set replicas to 0")) + return err } - oldPgVersion := extractedData.CurrentPgVersion - newPgVersion := extractedData.ImagePgVersion + // Install the traefik / nginx config on the node + // This will never be done in an operator. + needsHub := flags.HubXmlrpc.Replicas > 0 + if err := kubernetes.DeployNodeConfig(namespace, clusterInfos, needsHub, extractedData.Data.Debug); err != nil { + return err + } - if oldPgVersion != newPgVersion { - if err := kubernetes.RunPgsqlVersionUpgrade(flags.Image.Registry, flags.Image, - flags.DBUpgradeImage, namespace, nodeName, oldPgVersion, newPgVersion, - ); err != nil { - return utils.Errorf(err, L("cannot run PostgreSQL version upgrade script")) + // Deploy the SSL CA and server certificates + var caIssuer string + if extractedData.CaKey != "" { + // cert-manager is not required for 3rd party certificates, only if we have the CA key. + // Note that in an operator we won't be able to install cert-manager and just wait for it to be installed. + kubeconfig := clusterInfos.GetKubeconfig() + + if err := kubernetes.InstallCertManager(&flags.Helm, kubeconfig, flags.Image.PullPolicy); err != nil { + return utils.Errorf(err, L("cannot install cert manager")) } - } - schemaUpdateRequired := oldPgVersion != newPgVersion - if err := kubernetes.RunPgsqlFinalizeScript( - serverImage, flags.Image.PullPolicy, namespace, nodeName, schemaUpdateRequired, true, - ); err != nil { - return utils.Errorf(err, L("cannot run PostgreSQL finalisation script")) - } + // Convert CA to RSA to use in a Kubernetes TLS secret. + // In an operator we would have to fail now if there is no SSL password as we cannot prompt it. + ca := types.SSLPair{ + Key: base64.StdEncoding.EncodeToString( + ssl.GetRsaKey(extractedData.CaKey, flags.Installation.SSL.Password), + ), + Cert: base64.StdEncoding.EncodeToString(ssl.StripTextFromCertificate(extractedData.CaCert)), + } - if err := kubernetes.RunPostUpgradeScript(serverImage, flags.Image.PullPolicy, namespace, nodeName); err != nil { - return utils.Errorf(err, L("cannot run post upgrade script")) + // Install the cert-manager issuers + if _, err := kubernetes.DeployReusedCa(namespace, &ca); err != nil { + return err + } + caIssuer = shared_kubernetes.CaIssuerName + } else { + // Most likely a 3rd party certificate: cert-manager is not needed in this case + if err := installExistingCertificate(namespace, extractedData); err != nil { + return err + } } - hubReplicas := flags.HubXmlrpc.Replicas - if extractedData.HasHubXmlrpcAPI { - log.Info().Msg(L("Enabling Hub XML-RPC API since it is enabled on the migrated server")) - hubReplicas = 1 + // Create the Ingress routes before the deployments as those are triggering + // the creation of the uyuni-cert secret from cert-manager. + if err := kubernetes.CreateIngress(namespace, fqdn, caIssuer, clusterInfos.Ingress); err != nil { + return err } - // This is the final deployment, all the replicas need to be correct here. - if err := kubernetes.UyuniUpgrade( - serverImage, flags.Image.PullPolicy, hubReplicas, hubXmlrpcImage, &flags.Helm, kubeconfig, - fqdn, clusterInfos.Ingress, helmArgs..., + // Wait for uyuni-cert secret to be ready + shared_kubernetes.WaitForSecret(namespace, kubernetes.CertSecretName) + + deploymentsStarting := []string{kubernetes.ServerDeployName} + // Start the server + if err := kubernetes.CreateServerDeployment( + namespace, serverImage, flags.Image.PullPolicy, extractedData.Data.Timezone, extractedData.Data.Debug, + flags.Volumes.Mirror, pullSecret, ); err != nil { - return utils.Errorf(err, L("cannot upgrade to image %s"), serverImage) + return err } - if err := shared_kubernetes.WaitForDeployment(namespace, "uyuni", "uyuni"); err != nil { + // Create the services + if err := kubernetes.CreateServices(namespace, extractedData.Data.Debug); err != nil { return err } - // Reinitialize the connection since the pod name has changed since we first checked - cnx = shared.NewConnection("kubectl", "", shared_kubernetes.ServerFilter) - if err := cnx.CopyCaCertificate(fqdn); err != nil { - return utils.Errorf(err, L("failed to add SSL CA certificate to host trusted certificates")) + if clusterInfos.Ingress == "traefik" { + // Create the Traefik routes + if err := kubernetes.CreateTraefikRoutes(namespace, needsHub, extractedData.Data.Debug); err != nil { + return err + } + } + + // Store the extracted DB credentials in a secret. + if err := kubernetes.CreateDBSecret( + namespace, kubernetes.DBSecret, extractedData.Data.DBUser, extractedData.Data.DBPassword, + ); err != nil { + return err } - return nil -} -// updateIssuer replaces the temporary SSL certificate issuer with the source server CA. -// Return additional helm args to use the SSL certificates. -func setupSSL( - helm *adm_utils.HelmFlags, - kubeconfig string, - scriptDir string, - password string, - pullPolicy string) ([]string, - error, -) { - caCert := path.Join(scriptDir, "RHN-ORG-TRUSTED-SSL-CERT") - caKey := path.Join(scriptDir, "RHN-ORG-PRIVATE-SSL-KEY") - - if utils.FileExists(caCert) && utils.FileExists(caKey) { - key := base64.StdEncoding.EncodeToString(ssl.GetRsaKey(caKey, password)) - - // Strip down the certificate text part - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "openssl", "x509", "-in", caCert) + // Start the Coco Deployments if requested. + if flags.Coco.Replicas > 0 { + cocoImage, err := utils.ComputeImage(flags.Image.Registry, flags.Image.Tag, flags.Coco.Image) if err != nil { - return []string{}, utils.Errorf(err, L("failed to strip text part from CA certificate")) + return err } - cert := base64.StdEncoding.EncodeToString(out) - ca := types.SSLPair{Cert: cert, Key: key} + if err := kubernetes.StartCocoDeployment( + namespace, cocoImage, flags.Image.PullPolicy, pullSecret, flags.Coco.Replicas, + extractedData.Data.DBPort, extractedData.Data.DBName, + ); err != nil { + return err + } + deploymentsStarting = append(deploymentsStarting, kubernetes.CocoDeployName) + } - ret, err := kubernetes.DeployReusedCa(helm, &ca, kubeconfig, pullPolicy) + // In an operator mind, the user would just change the custom resource to enable the feature. + if extractedData.Data.HasHubXmlrpcAPI { + // Install Hub API deployment, service + hubAPIImage, err := utils.ComputeImage(flags.Image.Registry, flags.Image.Tag, flags.HubXmlrpc.Image) if err != nil { - return []string{}, utils.Errorf(err, L("cannot deploy certificate")) + return err } - return ret, nil - } - // Handle third party certificates and CA - sslFlags := adm_utils.InstallSSLFlags{ - Ca: types.CaChain{Root: caCert}, - Server: types.SSLPair{ - Key: path.Join(scriptDir, "spacewalk.key"), - Cert: path.Join(scriptDir, "spacewalk.crt"), - }, + if err := kubernetes.InstallHubAPI(namespace, hubAPIImage, flags.Image.PullPolicy, pullSecret); err != nil { + return err + } + deploymentsStarting = append(deploymentsStarting, kubernetes.HubAPIDeployName) } - if err := kubernetes.DeployExistingCertificate(helm, &sslFlags); err != nil { - return []string{}, nil + + // Wait for all the deployments to be ready + if err := shared_kubernetes.WaitForDeployments(namespace, deploymentsStarting...); err != nil { + return err } - return []string{}, nil + + return nil } diff --git a/mgradm/shared/kubernetes/certificates.go b/mgradm/shared/kubernetes/certificates.go index 7dd36936c..28e925df0 100644 --- a/mgradm/shared/kubernetes/certificates.go +++ b/mgradm/shared/kubernetes/certificates.go @@ -2,11 +2,14 @@ // // SPDX-License-Identifier: Apache-2.0 +//go:build !nok8s + package kubernetes import ( "encoding/base64" "errors" + "fmt" "os" "path/filepath" "time" @@ -20,16 +23,20 @@ import ( "github.com/uyuni-project/uyuni-tools/shared/ssl" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" + + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // Helm annotation to add in order to use cert-manager's uyuni CA issuer, in JSON format. -const ingressCertManagerAnnotation = "ingressSSLAnnotations={\"cert-manager.io/issuer\": \"uyuni-ca-issuer\"}" +var ingressCertManagerAnnotation = fmt.Sprintf( + "ingressSSLAnnotations={\"cert-manager.io/issuer\": \"%s\"}", + kubernetes.CaIssuerName, +) // DeployExistingCertificate execute a deploy of an existing certificate. -func DeployExistingCertificate( - helmFlags *cmd_utils.HelmFlags, - sslFlags *cmd_utils.InstallSSLFlags, -) error { +func DeployExistingCertificate(namespace string, sslFlags *cmd_utils.InstallSSLFlags) error { // Deploy the SSL Certificate secret and CA configmap serverCrt, rootCaCrt := ssl.OrderCas(&sslFlags.Ca, &sslFlags.Server) serverKey := utils.ReadFile(sslFlags.Server.Key) @@ -43,8 +50,8 @@ func DeployExistingCertificate( secretPath := filepath.Join(tempDir, "secret.yaml") log.Info().Msg(L("Creating SSL server certificate secret")) tlsSecretData := templates.TLSSecretTemplateData{ - Namespace: helmFlags.Uyuni.Namespace, - Name: "uyuni-cert", + Namespace: namespace, + Name: CertSecretName, Certificate: base64.StdEncoding.EncodeToString(serverCrt), Key: base64.StdEncoding.EncodeToString(serverKey), RootCa: base64.StdEncoding.EncodeToString(rootCaCrt), @@ -59,24 +66,13 @@ func DeployExistingCertificate( } // Copy the CA cert into uyuni-ca config map as the container shouldn't have the CA secret - createCaConfig(helmFlags.Uyuni.Namespace, rootCaCrt) - return nil + return createCaConfig(namespace, rootCaCrt) } -// DeployReusedCaCertificate deploys an existing SSL CA using cert-manager. -func DeployReusedCa( - helmFlags *cmd_utils.HelmFlags, - ca *types.SSLPair, - kubeconfig string, - imagePullPolicy string, -) ([]string, error) { +// DeployReusedCa deploys an existing SSL CA using an already installed cert-manager. +func DeployReusedCa(namespace string, ca *types.SSLPair) ([]string, error) { helmArgs := []string{} - // Install cert-manager if needed - if err := installCertManager(helmFlags, kubeconfig, imagePullPolicy); err != nil { - return []string{}, utils.Errorf(err, L("cannot install cert manager")) - } - log.Info().Msg(L("Creating cert-manager issuer for existing CA")) tempDir, cleaner, err := utils.TempDir() if err != nil { @@ -87,7 +83,7 @@ func DeployReusedCa( issuerPath := filepath.Join(tempDir, "issuer.yaml") issuerData := templates.ReusedCaIssuerTemplateData{ - Namespace: helmFlags.Uyuni.Namespace, + Namespace: namespace, Key: ca.Key, Certificate: ca.Cert, } @@ -102,19 +98,21 @@ func DeployReusedCa( } // Wait for issuer to be ready - if err := waitForIssuer(helmFlags.Uyuni.Namespace, "uyuni-ca-issuer"); err != nil { + if err := waitForIssuer(namespace, kubernetes.CaIssuerName); err != nil { return nil, err } helmArgs = append(helmArgs, "--set-json", ingressCertManagerAnnotation) // Copy the CA cert into uyuni-ca config map as the container shouldn't have the CA secret - createCaConfig(helmFlags.Uyuni.Namespace, []byte(ca.Cert)) + if err := createCaConfig(namespace, []byte(ca.Cert)); err != nil { + return nil, err + } return helmArgs, nil } // DeployGenerateCa deploys a new SSL CA using cert-manager. -func DeployCertificate( +func DeployGeneratedCa( helmFlags *cmd_utils.HelmFlags, sslFlags *cmd_utils.InstallSSLFlags, kubeconfig string, @@ -124,7 +122,7 @@ func DeployCertificate( helmArgs := []string{} // Install cert-manager if needed - if err := installCertManager(helmFlags, kubeconfig, imagePullPolicy); err != nil { + if err := InstallCertManager(helmFlags, kubeconfig, imagePullPolicy); err != nil { return []string{}, utils.Errorf(err, L("cannot install cert manager")) } @@ -164,7 +162,9 @@ func DeployCertificate( helmArgs = append(helmArgs, "--set-json", ingressCertManagerAnnotation) // Extract the CA cert into uyuni-ca config map as the container shouldn't have the CA secret - extractCaCertToConfig(helmFlags.Uyuni.Namespace) + if err := extractCaCertToConfig(helmFlags.Uyuni.Namespace); err != nil { + return nil, err + } return helmArgs, nil } @@ -186,8 +186,11 @@ func waitForIssuer(namespace string, name string) error { return errors.New(L("Issuer didn't turn ready after 60s")) } -func installCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, imagePullPolicy string) error { - if !kubernetes.IsDeploymentReady("", "cert-manager") { +// InstallCertManager deploys the cert-manager helm chart with the CRDs. +func InstallCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, imagePullPolicy string) error { + if ready, err := kubernetes.IsDeploymentReady("", "cert-manager"); err != nil { + return err + } else if !ready { log.Info().Msg(L("Installing cert-manager")) repo := "" chart := helmFlags.CertManager.Chart @@ -198,7 +201,7 @@ func installCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, image "--set", "crds.enabled=true", "--set", "crds.keep=true", "--set-json", "global.commonLabels={\"installedby\": \"mgradm\"}", - "--set", "image.pullPolicy=" + kubernetes.GetPullPolicy(imagePullPolicy), + "--set", "image.pullPolicy=" + string(kubernetes.GetPullPolicy(imagePullPolicy)), } extraValues := helmFlags.CertManager.Values if extraValues != "" { @@ -219,7 +222,7 @@ func installCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, image } // Wait for cert-manager to be ready - err := kubernetes.WaitForDeployment("", "cert-manager-webhook", "webhook") + err := kubernetes.WaitForDeployments("", "cert-manager-webhook") if err != nil { return utils.Errorf(err, L("cannot deploy")) } @@ -227,7 +230,7 @@ func installCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, image return nil } -func extractCaCertToConfig(namespace string) { +func extractCaCertToConfig(namespace string) error { // TODO Replace with [trust-manager](https://cert-manager.io/docs/projects/trust-manager/) to automate this const jsonPath = "-o=jsonpath={.data.ca\\.crt}" @@ -239,25 +242,35 @@ func extractCaCertToConfig(namespace string) { log.Info().Msgf(L("CA cert: %s"), string(out)) if err == nil && len(out) > 0 { log.Info().Msg(L("uyuni-ca configmap already existing, skipping extraction")) - return + return nil } - out, err = utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "get", "secret", "uyuni-ca", jsonPath, "-n", namespace) + out, err = utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "secret", "-n", namespace, "uyuni-ca", jsonPath, + ) if err != nil { - log.Fatal().Err(err).Msgf(L("Failed to get uyuni-ca certificate")) + return utils.Errorf(err, L("Failed to get uyuni-ca certificate")) } decoded, err := base64.StdEncoding.DecodeString(string(out)) if err != nil { - log.Fatal().Err(err).Msgf(L("Failed to base64 decode CA certificate")) + return utils.Errorf(err, L("Failed to base64 decode CA certificate")) } - createCaConfig(namespace, decoded) + return createCaConfig(namespace, decoded) } -func createCaConfig(namespace string, ca []byte) { - valueArg := "--from-literal=ca.crt=" + string(ca) - if err := utils.RunCmd("kubectl", "create", "configmap", "uyuni-ca", valueArg, "-n", namespace); err != nil { - log.Fatal().Err(err).Msg(L("Failed to create uyuni-ca config map from certificate")) +func createCaConfig(namespace string, ca []byte) error { + configMap := core.ConfigMap{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "ConfigMap"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: "uyuni-ca", + Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""), + }, + Data: map[string]string{ + "ca.crt": string(ca), + }, } + return kubernetes.Apply([]runtime.Object{&configMap}, L("failed to create the SSH migration ConfigMap")) } diff --git a/mgradm/shared/kubernetes/coco.go b/mgradm/shared/kubernetes/coco.go new file mode 100644 index 000000000..e40ee6b64 --- /dev/null +++ b/mgradm/shared/kubernetes/coco.go @@ -0,0 +1,101 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // CocoApiDeployName is the deployment name for confidential computing attestations. + CocoDeployName = "uyuni-coco-attestation" +) + +// StartCocoDeployment installs the confidential computing deployment. +func StartCocoDeployment( + namespace string, + image string, + pullPolicy string, + pullSecret string, + replicas int, + dbPort int, + dbName string, +) error { + deploy := getCocoDeployment(namespace, image, pullPolicy, pullSecret, int32(replicas), dbPort, dbName) + return kubernetes.Apply([]runtime.Object{deploy}, + L("failed to create confidential computing attestations deployment"), + ) +} + +func getCocoDeployment( + namespace string, + image string, + pullPolicy string, + pullSecret string, + replicas int32, + dbPort int, + dbName string, +) *apps.Deployment { + cnxURL := fmt.Sprintf("jdbc:postgresql://%s:%d/%s", utils.DBServiceName, dbPort, dbName) + deploy := &apps.Deployment{ + TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: CocoDeployName, + Namespace: namespace, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.CocoComponent), + }, + Spec: apps.DeploymentSpec{ + Replicas: &replicas, + Selector: &meta.LabelSelector{ + MatchLabels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.CocoComponent), + }, + Template: core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.CocoComponent), + }, + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: "coco", + Image: image, + ImagePullPolicy: kubernetes.GetPullPolicy(pullPolicy), + Env: []core.EnvVar{ + {Name: "database_connection", Value: cnxURL}, + {Name: "database_user", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: DBSecret}, + Key: secretUsername, + }, + }}, + {Name: "database_password", ValueFrom: &core.EnvVarSource{ + SecretKeyRef: &core.SecretKeySelector{ + LocalObjectReference: core.LocalObjectReference{Name: DBSecret}, + Key: secretPassword, + }, + }}, + }, + }, + }, + }, + }, + }, + } + + if pullSecret != "" { + deploy.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} + } + + return deploy +} diff --git a/mgradm/shared/kubernetes/db.go b/mgradm/shared/kubernetes/db.go new file mode 100644 index 000000000..7a60903ab --- /dev/null +++ b/mgradm/shared/kubernetes/db.go @@ -0,0 +1,44 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // DBSecret is the name of the database credentials secret. + DBSecret = "db-credentials" + // ReportdbSecret is the name of the report database credentials secret. + ReportdbSecret = "reportdb-credentials" + secretUsername = "username" + secretPassword = "password" +) + +// CreateDBSecret creates a secret containing the DB credentials. +func CreateDBSecret(namespace string, name string, user string, password string) error { + secret := core.Secret{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Secret"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.ServerComponent), + }, + // It seems serializing this object automatically transforms the secrets to base64. + Data: map[string][]byte{ + secretUsername: []byte(user), + secretPassword: []byte(password), + }, + Type: core.SecretTypeBasicAuth, + } + + return kubernetes.Apply([]runtime.Object{&secret}, L("failed to create the database secret")) +} diff --git a/mgradm/shared/kubernetes/dbFinalize.go b/mgradm/shared/kubernetes/dbFinalize.go new file mode 100644 index 000000000..37f2c2133 --- /dev/null +++ b/mgradm/shared/kubernetes/dbFinalize.go @@ -0,0 +1,64 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + batch "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DBFinalizeJobName is the name of the Database finalization job. +const DBFinalizeJobName = "uyuni-db-finalize" + +// StartDBFinalizeJob starts the database finalization job. +func StartDBFinalizeJob( + namespace string, + serverImage string, + pullPolicy string, + pullSecret string, + schemaUpdateRequired bool, + migration bool, +) (string, error) { + log.Info().Msg(L("Running database finalization, this could be long depending on the size of the database…")) + job, err := getDBFinalizeJob(namespace, serverImage, pullPolicy, pullSecret, schemaUpdateRequired, migration) + if err != nil { + return "", err + } + + return job.ObjectMeta.Name, kubernetes.Apply([]runtime.Object{job}, L("failed to run the database finalization job")) +} + +func getDBFinalizeJob( + namespace string, + image string, + pullPolicy string, + pullSecret string, + schemaUpdateRequired bool, + migration bool, +) (*batch.Job, error) { + mounts := []types.VolumeMount{ + {MountPath: "/var/lib/pgsql", Name: "var-pgsql"}, + {MountPath: "/etc/rhn", Name: "etc-rhn"}, + } + + // Prepare the script + scriptData := templates.FinalizePostgresTemplateData{ + RunAutotune: true, + RunReindex: true, + RunSchemaUpdate: schemaUpdateRequired, + Migration: migration, + Kubernetes: true, + } + + return kubernetes.GetScriptJob(namespace, DBFinalizeJobName, image, pullPolicy, pullSecret, mounts, scriptData) +} diff --git a/mgradm/shared/kubernetes/dbUpgradeJob.go b/mgradm/shared/kubernetes/dbUpgradeJob.go new file mode 100644 index 000000000..014aa98c3 --- /dev/null +++ b/mgradm/shared/kubernetes/dbUpgradeJob.go @@ -0,0 +1,79 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + batch "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// DBUpgradeJobName is the name of the database upgrade job. +const DBUpgradeJobName = "uyuni-db-upgrade" + +// StartDBUpgradeJob starts the database upgrade job. +func StartDBUpgradeJob( + namespace string, + registry string, + image types.ImageFlags, + migrationImage types.ImageFlags, + pullSecret string, + oldPgsql string, + newPgsql string, +) (string, error) { + log.Info().Msgf(L("Upgrading PostgreSQL database from %[1]s to %[2]s…"), oldPgsql, newPgsql) + + var migrationImageURL string + var err error + if migrationImage.Name == "" { + imageName := fmt.Sprintf("-migration-%s-%s", oldPgsql, newPgsql) + migrationImageURL, err = utils.ComputeImage(registry, image.Tag, image, imageName) + } else { + migrationImageURL, err = utils.ComputeImage(registry, image.Tag, migrationImage) + } + if err != nil { + return "", utils.Errorf(err, L("failed to compute image URL")) + } + + log.Info().Msgf(L("Using database upgrade image %s"), migrationImageURL) + + job, err := getDBUpgradeJob(namespace, migrationImageURL, image.PullPolicy, pullSecret, oldPgsql, newPgsql) + if err != nil { + return "", err + } + + return job.ObjectMeta.Name, kubernetes.Apply([]runtime.Object{job}, L("failed to run the database upgrade job")) +} + +func getDBUpgradeJob( + namespace string, + image string, + pullPolicy string, + pullSecret string, + oldPgsql string, + newPgsql string, +) (*batch.Job, error) { + mounts := []types.VolumeMount{ + {MountPath: "/var/lib/pgsql", Name: "var-pgsql"}, + } + + // Prepare the script + scriptData := templates.PostgreSQLVersionUpgradeTemplateData{ + OldVersion: oldPgsql, + NewVersion: newPgsql, + } + + return kubernetes.GetScriptJob(namespace, DBUpgradeJobName, image, pullPolicy, pullSecret, mounts, scriptData) +} diff --git a/mgradm/shared/kubernetes/deployment.go b/mgradm/shared/kubernetes/deployment.go index 9347fb3ae..83e6b3f9a 100644 --- a/mgradm/shared/kubernetes/deployment.go +++ b/mgradm/shared/kubernetes/deployment.go @@ -12,11 +12,288 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/uyuni-project/uyuni-tools/shared/utils" + + cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" ) // ServerDeployName is the name of the server deployment. const ServerDeployName = "uyuni" +// CreateServerDeployment creates a new deployment of the server. +func CreateServerDeployment( + namespace string, + serverImage string, + pullPolicy string, + timezone string, + debug bool, + mirrorPvName string, + pullSecret string, +) error { + if mirrorPvName != "" { + // Create a PVC using the required mirror PV + if err := kubernetes.CreatePersistentVolumeClaimForVolume(namespace, mirrorPvName); err != nil { + return err + } + } + + serverDeploy := getServerDeployment( + namespace, serverImage, kubernetes.GetPullPolicy(pullPolicy), timezone, debug, mirrorPvName, pullSecret, + ) + + return kubernetes.Apply([]runtime.Object{serverDeploy}, L("failed to create the server deployment")) +} + +func getServerDeployment( + namespace string, + image string, + pullPolicy core.PullPolicy, + timezone string, + debug bool, + mirrorPvName string, + pullSecret string, +) *apps.Deployment { + var replicas int32 = 1 + + envs := []core.EnvVar{ + {Name: "TZ", Value: timezone}, + } + + mounts := GetServerMounts() + + // Convert our mounts to Kubernetes objects + volumeMounts := kubernetes.ConvertVolumeMounts(mounts) + + // The init mounts are the same mounts but in /mnt just for the init container populating the volumes + var initMounts []core.VolumeMount + for _, mount := range volumeMounts { + initMount := mount.DeepCopy() + initMount.MountPath = "/mnt" + initMount.MountPath + initMounts = append(initMounts, *initMount) + } + + if mirrorPvName != "" { + // Add a mount for the mirror + mounts = append(mounts, types.VolumeMount{MountPath: "/mirror", Name: mirrorPvName}) + + // Add the environment variable for the deployment to use the mirror + // This doesn't makes sense for migration as the setup script is not executed + envs = append(envs, core.EnvVar{Name: "MIRROR_PATH", Value: "/mirror"}) + } + + volumes := kubernetes.CreateVolumes(mounts) + + runMount, runVolume := kubernetes.CreateTmpfsMount("/run", "256Mi") + cgroupMount, cgroupVolume := kubernetes.CreateHostPathMount( + "/sys/fs/cgroup", "/sys/fs/cgroup", core.HostPathDirectory, + ) + + caMount := core.VolumeMount{ + Name: "ca-cert", + MountPath: "/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT", + ReadOnly: true, + SubPath: "ca.crt", + } + tlsKeyMount := core.VolumeMount{Name: "tls-key", MountPath: "/etc/pki/spacewalk-tls"} + + caVolume := kubernetes.CreateConfigVolume("ca-cert", "uyuni-ca") + tlsKeyVolume := kubernetes.CreateSecretVolume("tls-key", "uyuni-cert") + var keyMode int32 = 0400 + tlsKeyVolume.VolumeSource.Secret.Items = []core.KeyToPath{ + {Key: "tls.crt", Path: "spacewalk.crt"}, + {Key: "tls.key", Path: "spacewalk.key", Mode: &keyMode}, + } + + initMounts = append(initMounts, tlsKeyMount) + volumeMounts = append(volumeMounts, runMount, cgroupMount, caMount, tlsKeyMount) + volumes = append(volumes, runVolume, cgroupVolume, caVolume, tlsKeyVolume) + + // Compute the needed ports + ports := utils.GetServerPorts(debug) + + deployment := apps.Deployment{ + TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: ServerDeployName, + Namespace: namespace, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.ServerComponent), + }, + Spec: apps.DeploymentSpec{ + Replicas: &replicas, + // As long as the container cannot scale, we need to stick to recreate strategy + // or the new deployed pods won't be ready. + Strategy: apps.DeploymentStrategy{Type: apps.RecreateDeploymentStrategyType}, + Selector: &meta.LabelSelector{ + MatchLabels: map[string]string{kubernetes.ComponentLabel: kubernetes.ServerComponent}, + }, + Template: core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.ServerComponent), + }, + Spec: core.PodSpec{ + InitContainers: []core.Container{ + { + Name: "init-volumes", + Image: image, + ImagePullPolicy: pullPolicy, + Command: []string{"sh", "-x", "-c", initScript}, + VolumeMounts: initMounts, + }, + }, + Containers: []core.Container{ + { + Name: "uyuni", + Image: image, + ImagePullPolicy: pullPolicy, + Lifecycle: &core.Lifecycle{ + PreStop: &core.LifecycleHandler{ + Exec: &core.ExecAction{ + Command: []string{"/bin/sh", "-c", "spacewalk-service stop && systemctl stop postgresql"}, + }, + }, + }, + Ports: kubernetes.ConvertPortMaps(ports), + Env: envs, + ReadinessProbe: &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Port: intstr.FromInt(80), + Path: "/rhn/metrics", + }, + }, + PeriodSeconds: 30, + TimeoutSeconds: 20, + FailureThreshold: 5, + }, + LivenessProbe: &core.Probe{ + ProbeHandler: core.ProbeHandler{ + HTTPGet: &core.HTTPGetAction{ + Port: intstr.FromInt(80), + Path: "/rhn/metrics", + }, + }, + InitialDelaySeconds: 60, + PeriodSeconds: 60, + TimeoutSeconds: 20, + FailureThreshold: 5, + }, + VolumeMounts: volumeMounts, + }, + }, + Volumes: volumes, + }, + }, + }, + } + + if pullSecret != "" { + deployment.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} + } + + return &deployment +} + +const initScript = ` +# Fill he empty volumes +for vol in /var/lib/cobbler \ + /var/lib/salt \ + /var/lib/pgsql \ + /var/cache \ + /var/log \ + /srv/salt \ + /srv/www \ + /srv/tftpboot \ + /srv/formula_metadata \ + /srv/pillar \ + /srv/susemanager \ + /srv/spacewalk \ + /root \ + /etc/apache2 \ + /etc/rhn \ + /etc/systemd/system/multi-user.target.wants \ + /etc/systemd/system/sockets.target.wants \ + /etc/salt \ + /etc/tomcat \ + /etc/cobbler \ + /etc/sysconfig \ + /etc/postfix \ + /etc/sssd \ + /etc/pki/tls +do + chown --reference=$vol /mnt$vol; + chmod --reference=$vol /mnt$vol; + if [ -z "$(ls -A /mnt$vol)" ]; then + cp -a $vol/. /mnt$vol; + if [ "$vol" = "/srv/www" ]; then + ln -s /etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT /mnt$vol/RHN-ORG-TRUSTED-SSL-CERT; + fi + + if [ "$vol" = "/etc/pki/tls" ]; then + ln -s /etc/pki/spacewalk-tls/spacewalk.crt /mnt/etc/pki/tls/certs/spacewalk.crt; + ln -s /etc/pki/spacewalk-tls/spacewalk.key /mnt/etc/pki/tls/private/spacewalk.key; + cp /etc/pki/spacewalk-tls/spacewalk.key /mnt/etc/pki/tls/private/pg-spacewalk.key; + chown postgres:postgres /mnt/etc/pki/tls/private/pg-spacewalk.key; + fi + fi +done +` + +// GetServerMounts returns the volume mounts required for the server pod. +func GetServerMounts() []types.VolumeMount { + // Filter out the duplicate mounts to avoid issues applying the jobs + serverMounts := utils.ServerVolumeMounts + mounts := []types.VolumeMount{} + mountsSet := map[string]types.VolumeMount{} + for _, mount := range serverMounts { + switch mount.Name { + // Skip mounts that are not PVCs + case "ca-cert", "tls-key": + continue + } + if _, exists := mountsSet[mount.Name]; !exists { + mounts = append(mounts, mount) + mountsSet[mount.Name] = mount + } + } + + return mounts +} + +// TuneMounts adjusts the server mounts with the size and storage class passed by as parameters. +func TuneMounts(mounts []types.VolumeMount, flags *cmd_utils.VolumesFlags) []types.VolumeMount { + tunedMounts := []types.VolumeMount{} + for _, mount := range mounts { + class := flags.Class + var volumeFlags *cmd_utils.VolumeFlags + switch mount.Name { + case "var-pgsql": + volumeFlags = &flags.Database + case "var-spacewalk": + volumeFlags = &flags.Packages + case "var-cache": + volumeFlags = &flags.Cache + case "srv-www": + volumeFlags = &flags.Www + } + if volumeFlags != nil { + if volumeFlags.Class != "" { + class = volumeFlags.Class + } + mount.Size = volumeFlags.Size + } + mount.Class = class + tunedMounts = append(tunedMounts, mount) + } + return tunedMounts +} + var runCmdOutput = utils.RunCmdOutput // getRunningServerImage extracts the main server container image from a running deployment. diff --git a/mgradm/shared/kubernetes/flags.go b/mgradm/shared/kubernetes/flags.go index 427692ee2..c73a76483 100644 --- a/mgradm/shared/kubernetes/flags.go +++ b/mgradm/shared/kubernetes/flags.go @@ -12,6 +12,7 @@ import "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" type KubernetesServerFlags struct { utils.ServerFlags `mapstructure:",squash"` Helm utils.HelmFlags + Volumes utils.VolumesFlags // SSH defines the SSH configuration to use to connect to the source server to migrate. SSH utils.SSHFlags } diff --git a/mgradm/shared/kubernetes/hubApi.go b/mgradm/shared/kubernetes/hubApi.go new file mode 100644 index 000000000..93edfb814 --- /dev/null +++ b/mgradm/shared/kubernetes/hubApi.go @@ -0,0 +1,101 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + apps "k8s.io/api/apps/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +const ( + // HubAPIDeployName is the deployment name of the Hub API. + HubAPIDeployName = "uyuni-hub-api" + hubAPIServiceName = "hub-api" +) + +// InstallHubAPI installs the Hub API deployment and service. +func InstallHubAPI(namespace string, image string, pullPolicy string, pullSecret string) error { + if err := startHubAPIDeployment(namespace, image, pullPolicy, pullSecret); err != nil { + return err + } + + if err := createHubAPIService(namespace); err != nil { + return err + } + + // TODO Do we want an ingress to use port 80 / 443 from the outside too? + // This would have an impact on the user's scripts. + return nil +} + +func startHubAPIDeployment(namespace string, image string, pullPolicy string, pullSecret string) error { + deploy := getHubAPIDeployment(namespace, image, pullPolicy, pullSecret) + return kubernetes.Apply([]runtime.Object{deploy}, L("failed to create the hub API deployment")) +} + +func getHubAPIDeployment(namespace string, image string, pullPolicy string, pullSecret string) *apps.Deployment { + var replicas int32 = 1 + + deploy := &apps.Deployment{ + TypeMeta: meta.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: HubAPIDeployName, + Namespace: namespace, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.HubAPIComponent), + }, + Spec: apps.DeploymentSpec{ + Replicas: &replicas, + Selector: &meta.LabelSelector{ + MatchLabels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.HubAPIComponent), + }, + Template: core.PodTemplateSpec{ + ObjectMeta: meta.ObjectMeta{ + Labels: kubernetes.GetLabels(kubernetes.ServerApp, kubernetes.HubAPIComponent), + }, + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: "uyuni-hub-api", + Image: image, + ImagePullPolicy: kubernetes.GetPullPolicy(pullPolicy), + Ports: []core.ContainerPort{ + { + ContainerPort: int32(2830), + }, + }, + Env: []core.EnvVar{ + {Name: "HUB_API_URL", Value: fmt.Sprintf("http://%s/rpc/api", utils.WebServiceName)}, + {Name: "HUB_CONNECT_TIMEOUT", Value: "10"}, + {Name: "HUB_REQUEST_TIMEOUT", Value: "10"}, + {Name: "HUB_CONNECT_USING_SSL", Value: "false"}, + }, + }, + }, + }, + }, + }, + } + + if pullSecret != "" { + deploy.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} + } + return deploy +} + +func createHubAPIService(namespace string) error { + svc := getService(namespace, kubernetes.ServerApp, kubernetes.HubAPIComponent, hubAPIServiceName, core.ProtocolTCP, + utils.NewPortMap(utils.HubAPIServiceName, "api", 2830, 2830), + ) + return kubernetes.Apply([]runtime.Object{svc}, L("failed to create the hub API service")) +} diff --git a/mgradm/shared/kubernetes/ingress.go b/mgradm/shared/kubernetes/ingress.go new file mode 100644 index 000000000..337e5c16c --- /dev/null +++ b/mgradm/shared/kubernetes/ingress.go @@ -0,0 +1,206 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + net "k8s.io/api/networking/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CertSecretName is the name of the server SSL certificate secret to use. +const CertSecretName = "uyuni-cert" + +const ( + IngressNameSSL = "uyuni-ingress-ssl" + IngressNameSSLRedirect = "uyuni-ingress-ssl-redirect" + IngressNameNoSSL = "uyuni-ingress-nossl" +) + +// CreateIngress creates the ingress definitions for Uyuni server. +// +// fqdn is the fully qualified domain name associated with the Uyuni server. +// +// caIssuer is the name of the cert-manager to associate for the SSL routes. +// It can be empty if cert-manager is not used. +// +// ingressName is one of traefik or nginx. +func CreateIngress(namespace string, fqdn string, caIssuer string, ingressName string) error { + ingresses := GetIngresses(namespace, fqdn, caIssuer, ingressName) + return kubernetes.Apply(ingresses, L("failed to create the ingresses")) +} + +// GetIngresses returns the ingress definitions to create based on the name of the ingress. +// If ingressName is neither nginx nor traefik, no ingress rules are returned. +func GetIngresses(namespace string, fqdn string, caIssuer string, ingressName string) []*net.Ingress { + ingresses := []*net.Ingress{} + if ingressName != "nginx" && ingressName != "traefik" { + return ingresses + } + + ingresses = append(ingresses, + getSSLIngress(namespace, fqdn, caIssuer, ingressName), + getNoSSLIngress(namespace, fqdn, ingressName), + ) + sslRedirectIngress := getSSLRedirectIngress(namespace, fqdn, ingressName) + if sslRedirectIngress != nil { + ingresses = append(ingresses, sslRedirectIngress) + } + return ingresses +} + +func getSSLIngress(namespace string, fqdn string, caIssuer string, ingressName string) *net.Ingress { + annotations := map[string]string{} + if caIssuer != "" { + annotations["cert-manager.io/issuer"] = caIssuer + } + if ingressName == "traefik" { + annotations["traefik.ingress.kubernetes.io/router.tls"] = "true" + annotations["traefik.ingress.kubernetes.io/router.tls.domains.n.main"] = fqdn + annotations["traefik.ingress.kubernetes.io/router.entrypoints"] = "websecure,web" + } + + ingress := net.Ingress{ + TypeMeta: meta.TypeMeta{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: IngressNameSSL, + Annotations: annotations, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""), + }, + Spec: net.IngressSpec{ + TLS: []net.IngressTLS{ + {Hosts: []string{fqdn}, SecretName: CertSecretName}, + }, + Rules: []net.IngressRule{ + getIngressWebRule(fqdn), + }, + }, + } + + return &ingress +} + +func getSSLRedirectIngress(namespace string, fqdn string, ingressName string) *net.Ingress { + var ingress *net.Ingress + + // Nginx doesn't require a special ingress for the SSL redirection. + if ingressName == "traefik" { + ingress = &net.Ingress{ + TypeMeta: meta.TypeMeta{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: IngressNameSSLRedirect, + Annotations: map[string]string{ + "traefik.ingress.kubernetes.io/router.middlewares": "default-uyuni-https-redirect@kubernetescrd", + "traefik.ingress.kubernetes.io/router.entrypoints": "web", + }, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""), + }, + Spec: net.IngressSpec{ + Rules: []net.IngressRule{ + getIngressWebRule(fqdn), + }, + }, + } + } + + return ingress +} + +var noSSLPaths = []string{ + "/pub", + "/rhn/([^/])+/DownloadFile", + "/(rhn/)?rpc/api", + "/rhn/errors", + "/rhn/ty/TinyUrl", + "/rhn/websocket", + "/rhn/metrics", + "/cobbler_api", + "/cblr", + "/httpboot", + "/images", + "/cobbler", + "/os-images", + "/tftp", + "/docs", +} + +func getNoSSLIngress(namespace string, fqdn string, ingressName string) *net.Ingress { + annotations := map[string]string{} + if ingressName == "nginx" { + annotations["nginx.ingress.kubernetes.io/ssl-redirect"] = "false" + } + if ingressName == "traefik" { + annotations["traefik.ingress.kubernetes.io/router.tls"] = "false" + annotations["traefik.ingress.kubernetes.io/router.entrypoints"] = "web" + } + + pathType := net.PathTypePrefix + paths := []net.HTTPIngressPath{} + for _, noSSLPath := range noSSLPaths { + paths = append(paths, net.HTTPIngressPath{ + Backend: webServiceBackend, + Path: noSSLPath, + PathType: &pathType, + }) + } + + ingress := net.Ingress{ + TypeMeta: meta.TypeMeta{APIVersion: "networking.k8s.io/v1", Kind: "Ingress"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: IngressNameNoSSL, + Annotations: annotations, + Labels: kubernetes.GetLabels(kubernetes.ServerApp, ""), + }, + Spec: net.IngressSpec{ + TLS: []net.IngressTLS{ + {Hosts: []string{fqdn}, SecretName: CertSecretName}, + }, + Rules: []net.IngressRule{ + { + Host: fqdn, + IngressRuleValue: net.IngressRuleValue{ + HTTP: &net.HTTPIngressRuleValue{Paths: paths}, + }, + }, + }, + }, + } + + return &ingress +} + +// build the ingress rule object catching all HTTP traffic. +func getIngressWebRule(fqdn string) net.IngressRule { + pathType := net.PathTypePrefix + + return net.IngressRule{ + Host: fqdn, + IngressRuleValue: net.IngressRuleValue{ + HTTP: &net.HTTPIngressRuleValue{ + Paths: []net.HTTPIngressPath{ + { + Backend: webServiceBackend, + Path: "/", + PathType: &pathType, + }, + }, + }, + }, + } +} + +var webServiceBackend net.IngressBackend = net.IngressBackend{ + Service: &net.IngressServiceBackend{ + Name: utils.WebServiceName, + Port: net.ServiceBackendPort{Number: 80}, + }, +} diff --git a/mgradm/shared/kubernetes/install.go b/mgradm/shared/kubernetes/install.go index e08820d7e..144214335 100644 --- a/mgradm/shared/kubernetes/install.go +++ b/mgradm/shared/kubernetes/install.go @@ -84,7 +84,7 @@ func Deploy( } // Wait for the pod to be started - err = kubernetes.WaitForDeployment(helmFlags.Uyuni.Namespace, HelmAppName, "uyuni") + err = kubernetes.WaitForDeployments(helmFlags.Uyuni.Namespace, HelmAppName) if err != nil { return utils.Errorf(err, L("cannot deploy")) } @@ -118,8 +118,9 @@ func UyuniUpgrade( // The values computed from the command line need to be last to override what could be in the extras helmParams = append(helmParams, "--set", "images.server="+serverImage, - "--set", "pullPolicy="+kubernetes.GetPullPolicy(pullPolicy), - "--set", "fqdn="+fqdn) + "--set", "pullPolicy="+string(kubernetes.GetPullPolicy(pullPolicy)), + "--set", "fqdn="+fqdn, + ) if hubXmlrpcReplicas > 0 { log.Info().Msg(L("Enabling Hub XMLRPC API container.")) @@ -208,12 +209,6 @@ func Upgrade( return utils.Errorf(err, L("cannot set replica to 0")) } - defer func() { - // if something is running, we don't need to set replicas to 1 - if _, err = kubernetes.GetNode(namespace, kubernetes.ServerFilter); err != nil { - err = kubernetes.ReplicasTo(namespace, kubernetes.ServerApp, 1) - } - }() if inspectedValues.ImagePgVersion > inspectedValues.CurrentPgVersion { log.Info().Msgf(L("Previous PostgreSQL is %[1]s, new one is %[2]s. Performing a DB version upgrade…"), inspectedValues.CurrentPgVersion, inspectedValues.ImagePgVersion) @@ -270,5 +265,5 @@ func Upgrade( return utils.Errorf(err, L("cannot upgrade to image %s"), serverImage) } - return kubernetes.WaitForDeployment(namespace, "uyuni", "uyuni") + return kubernetes.WaitForDeployments(namespace, "uyuni") } diff --git a/mgradm/shared/kubernetes/k3s.go b/mgradm/shared/kubernetes/k3s.go index ca756fa9f..ebdd507b8 100644 --- a/mgradm/shared/kubernetes/k3s.go +++ b/mgradm/shared/kubernetes/k3s.go @@ -64,7 +64,7 @@ func RunPgsqlVersionUpgrade( } log.Info().Msgf(L("Using database upgrade image %s"), upgradeImageURL) - pgsqlVersionUpgradeScriptName, err := adm_utils.GeneratePgsqlVersionUpgradeScript(scriptDir, oldPgsql, newPgsql, true) + pgsqlVersionUpgradeScriptName, err := adm_utils.GeneratePgsqlVersionUpgradeScript(scriptDir, oldPgsql, newPgsql) if err != nil { return utils.Errorf(err, L("cannot generate PostgreSQL database version upgrade script")) } diff --git a/mgradm/shared/kubernetes/namespace.go b/mgradm/shared/kubernetes/namespace.go new file mode 100644 index 000000000..108554be0 --- /dev/null +++ b/mgradm/shared/kubernetes/namespace.go @@ -0,0 +1,26 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// CreateNamespace creates a kubernetes namespace. +func CreateNamespace(namespace string) error { + ns := core.Namespace{ + TypeMeta: meta.TypeMeta{Kind: "Namespace", APIVersion: "v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: namespace, + }, + } + return kubernetes.Apply([]runtime.Object{&ns}, L("failed to create the namespace")) +} diff --git a/mgradm/shared/kubernetes/postUpgradeJob.go b/mgradm/shared/kubernetes/postUpgradeJob.go new file mode 100644 index 000000000..7d685514f --- /dev/null +++ b/mgradm/shared/kubernetes/postUpgradeJob.go @@ -0,0 +1,39 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + + "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + batch "k8s.io/api/batch/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// PostUpgradeJobName is the name of the job apply the database changes after the upgrade. +const PostUpgradeJobName = "uyuni-post-upgrade" + +// StartPostUpgradeJob starts the job applying the database changes after the upgrade. +func StartPostUpgradeJob(namespace string, image string, pullPolicy string, pullSecret string) (string, error) { + log.Info().Msg(L("Performing post upgrade changes…")) + + job, err := getPostUpgradeJob(namespace, image, pullPolicy, pullSecret) + if err != nil { + return "", err + } + + return job.ObjectMeta.Name, kubernetes.Apply([]runtime.Object{job}, L("failed to run the post upgrade job")) +} + +func getPostUpgradeJob(namespace string, image string, pullPolicy string, pullSecret string) (*batch.Job, error) { + scriptData := templates.PostUpgradeTemplateData{} + mounts := GetServerMounts() + + return kubernetes.GetScriptJob(namespace, PostUpgradeJobName, image, pullPolicy, pullSecret, mounts, scriptData) +} diff --git a/mgradm/shared/kubernetes/services.go b/mgradm/shared/kubernetes/services.go new file mode 100644 index 000000000..f5fb3b4a4 --- /dev/null +++ b/mgradm/shared/kubernetes/services.go @@ -0,0 +1,112 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + "strings" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// CreateServices creates the kubernetes services for the server. +// +// If debug is true, the Java debug ports will be exposed. +func CreateServices(namespace string, debug bool) error { + services := GetServices(namespace, debug) + for _, svc := range services { + if !hasCustomService(namespace, svc.ObjectMeta.Name) { + if err := kubernetes.Apply([]*core.Service{svc}, L("failed to create the service")); err != nil { + return err + } + } + } + return nil +} + +// GetServices creates the definitions of all the services of the server. +// +// If debug is true, the Java debug ports will be exposed. +func GetServices(namespace string, debug bool) []*core.Service { + ports := utils.GetServerPorts(debug) + ports = append(ports, utils.DBPorts...) + + servicesPorts := map[string][]types.PortMap{} + for _, port := range ports { + svcPorts := servicesPorts[port.Service] + if svcPorts == nil { + svcPorts = []types.PortMap{} + } + svcPorts = append(svcPorts, port) + servicesPorts[port.Service] = svcPorts + } + + services := []*core.Service{} + for _, svcPorts := range servicesPorts { + protocol := core.ProtocolTCP + if svcPorts[0].Protocol == "udp" { + protocol = core.ProtocolUDP + } + services = append(services, + getService(namespace, kubernetes.ServerApp, kubernetes.ServerComponent, svcPorts[0].Service, protocol, svcPorts...), + ) + } + return services +} + +func getService( + namespace string, + app string, + component string, + name string, + protocol core.Protocol, + ports ...types.PortMap, +) *core.Service { + // TODO make configurable to allow NodePort and maybe LoadBalancer for exposed services. + serviceType := core.ServiceTypeClusterIP + + portObjs := []core.ServicePort{} + for _, port := range ports { + portObjs = append(portObjs, core.ServicePort{ + Name: port.Name, + Port: int32(port.Exposed), + TargetPort: intstr.FromInt(port.Port), + Protocol: protocol, + }) + } + + return &core.Service{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Service"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: kubernetes.GetLabels(app, component), + }, + Spec: core.ServiceSpec{ + Ports: portObjs, + Selector: map[string]string{kubernetes.ComponentLabel: component}, + Type: serviceType, + }, + } +} + +func hasCustomService(namespace string, name string) bool { + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "svc", "-n", namespace, name, + "-l", fmt.Sprintf("%s!=%s", kubernetes.AppLabel, kubernetes.ServerApp), + "-o", "jsonpath={.items[?(@.metadata.name=='db')]}", + ) + // Custom services don't have our app label! + return err == nil && strings.TrimSpace(string(out)) != "" +} diff --git a/mgradm/shared/kubernetes/traefik.go b/mgradm/shared/kubernetes/traefik.go new file mode 100644 index 000000000..914f260ef --- /dev/null +++ b/mgradm/shared/kubernetes/traefik.go @@ -0,0 +1,127 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + "html/template" + "io" + "os" + "path" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + + . "github.com/uyuni-project/uyuni-tools/shared/l10n" +) + +// CreateTraefikRoutes creates the routes and middleware wiring the traefik endpoints to their service. +func CreateTraefikRoutes(namespace string, hub bool, debug bool) error { + routeTemplate := template.Must(template.New("ingressRoute").Parse(ingressRouteTemplate)) + + tempDir, cleaner, err := utils.TempDir() + if err != nil { + return err + } + defer cleaner() + + filePath := path.Join(tempDir, "routes.yaml") + file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0700) + if err != nil { + return utils.Errorf(err, L("failed to open %s for writing"), filePath) + } + defer file.Close() + + // Write the SSL Redirect middleware + _, err = file.WriteString(fmt.Sprintf(` +apiVersion: traefik.containo.us/v1alpha1 +kind: Middleware +metadata: + name: uyuni-https-redirect + namespace: "%s" + labels: + %s: %s +spec: + redirectScheme: + scheme: https + permanent: true +`, namespace, kubernetes.AppLabel, kubernetes.ServerApp)) + if err != nil { + return utils.Errorf(err, L("failed to write traefik middleware and routes to file")) + } + + // Write the routes from the endpoint to the services + for _, endpoint := range GetPortLists(hub, debug) { + _, err := file.WriteString("---\n") + if err != nil { + return utils.Errorf(err, L("failed to write traefik middleware and routes to file")) + } + if err := getTraefixRoute(routeTemplate, file, namespace, endpoint); err != nil { + return err + } + } + if err := file.Close(); err != nil { + return utils.Errorf(err, L("failed to close traefik middleware and routes file")) + } + + if _, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "apply", "-f", filePath); err != nil { + return utils.Errorf(err, L("failed to create traefik middleware and routes")) + } + return nil +} + +func getTraefixRoute(t *template.Template, writer io.Writer, namespace string, endpoint types.PortMap) error { + endpointName := kubernetes.GetTraefikEndpointName(endpoint) + protocol := "TCP" + if endpoint.Protocol == "udp" { + protocol = "UDP" + } + + data := routeData{ + Name: endpointName + "-route", + Namespace: namespace, + EndPoint: endpointName, + Service: endpoint.Service, + Port: endpoint.Exposed, + Protocol: protocol, + } + if err := t.Execute(writer, data); err != nil { + return utils.Errorf(err, L("failed to write traefik routes to file")) + } + return nil +} + +type routeData struct { + Name string + Namespace string + EndPoint string + Service string + Port int + Protocol string +} + +const ingressRouteTemplate = ` +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRoute{{ .Protocol }} +metadata: + name: {{ .Name }} + namespace: "{{ .Namespace }}" + labels: + ` + kubernetes.AppLabel + ": " + kubernetes.ServerApp + ` +spec: + entryPoints: + - {{ .EndPoint }} + routes: + - services: + - name: {{ .Service }} + port: {{ .Port }} +{{- if eq .Protocol "TCP" }} + match: ` + "HostSNI(`*`)" + ` +{{- end }} +` diff --git a/mgradm/shared/kubernetes/traefik_test.go b/mgradm/shared/kubernetes/traefik_test.go new file mode 100644 index 000000000..ba9eee3b8 --- /dev/null +++ b/mgradm/shared/kubernetes/traefik_test.go @@ -0,0 +1,82 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "bytes" + "html/template" + "testing" + + "github.com/uyuni-project/uyuni-tools/shared/testutils" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +func TestGetTraefikRouteTCP(t *testing.T) { + routeTemplate := template.Must(template.New("ingressRoute").Parse(ingressRouteTemplate)) + + var buf bytes.Buffer + err := getTraefixRoute(routeTemplate, &buf, "foo", utils.NewPortMap("svcname", "port1", 123, 456)) + if err != nil { + t.Errorf("Unexpected error: %s", err) + } + + actual := buf.String() + expected := ` +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteTCP +metadata: + name: svcname-port1-route + namespace: "foo" + labels: + app.kubernetes.io/part-of: uyuni +spec: + entryPoints: + - svcname-port1 + routes: + - services: + - name: svcname + port: 123 + match: ` + "HostSNI(`*`)\n" + testutils.AssertEquals(t, "Wrong traefik route generated", expected, actual) +} + +func TestGetTraefikRouteUDP(t *testing.T) { + routeTemplate := template.Must(template.New("ingressRoute").Parse(ingressRouteTemplate)) + + var buf bytes.Buffer + err := getTraefixRoute(routeTemplate, &buf, "foo", + types.PortMap{ + Service: "svcname", + Name: "port1", + Exposed: 123, + Port: 456, + Protocol: "udp", + }) + if err != nil { + t.Errorf("Unexpected error: %s", err) + } + + actual := buf.String() + expected := ` +apiVersion: traefik.containo.us/v1alpha1 +kind: IngressRouteUDP +metadata: + name: svcname-port1-route + namespace: "foo" + labels: + app.kubernetes.io/part-of: uyuni +spec: + entryPoints: + - svcname-port1 + routes: + - services: + - name: svcname + port: 123 +` + testutils.AssertEquals(t, "Wrong traefik route generated", expected, actual) +} diff --git a/mgradm/shared/podman/podman.go b/mgradm/shared/podman/podman.go index 213aac5ca..ebd2f69df 100644 --- a/mgradm/shared/podman/podman.go +++ b/mgradm/shared/podman/podman.go @@ -30,7 +30,7 @@ import ( // GetExposedPorts returns the port exposed. func GetExposedPorts(debug bool) []types.PortMap { ports := utils.GetServerPorts(debug) - ports = append(ports, utils.NewPortMap(utils.ServerTCPServiceName, "https", 443, 443)) + ports = append(ports, utils.NewPortMap(utils.WebServiceName, "https", 443, 443)) ports = append(ports, utils.TCPPodmanPorts...) return ports } @@ -278,9 +278,7 @@ func RunPgsqlVersionUpgrade( log.Info().Msgf(L("Using database upgrade image %s"), preparedImage) - pgsqlVersionUpgradeScriptName, err := adm_utils.GeneratePgsqlVersionUpgradeScript( - scriptDir, oldPgsql, newPgsql, false, - ) + pgsqlVersionUpgradeScriptName, err := adm_utils.GeneratePgsqlVersionUpgradeScript(scriptDir, oldPgsql, newPgsql) if err != nil { return utils.Errorf(err, L("cannot generate PostgreSQL database version upgrade script")) } diff --git a/mgradm/shared/templates/issuerTemplate.go b/mgradm/shared/templates/issuerTemplate.go index 614d20460..cddb86958 100644 --- a/mgradm/shared/templates/issuerTemplate.go +++ b/mgradm/shared/templates/issuerTemplate.go @@ -7,6 +7,8 @@ package templates import ( "io" "text/template" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" ) // Deploy self-signed issuer or CA Certificate and key. @@ -15,6 +17,8 @@ kind: Issuer metadata: name: uyuni-issuer namespace: {{ .Namespace }} + labels: + app: ` + kubernetes.ServerApp + ` spec: selfSigned: {} --- @@ -23,6 +27,8 @@ kind: Certificate metadata: name: uyuni-ca namespace: {{ .Namespace }} + labels: + app: ` + kubernetes.ServerApp + ` spec: isCA: true {{- if or .Country .State .City .Org .OrgUnit }} diff --git a/mgradm/shared/templates/migrateScriptTemplate.go b/mgradm/shared/templates/migrateScriptTemplate.go index a7d4f924a..fbdf3c86b 100644 --- a/mgradm/shared/templates/migrateScriptTemplate.go +++ b/mgradm/shared/templates/migrateScriptTemplate.go @@ -137,6 +137,7 @@ grep '^db_name' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data grep '^db_port' /etc/rhn/rhn.conf | sed 's/[ \t]//g' >>/var/lib/uyuni-tools/data $SSH {{ .SourceFqdn }} sh -c "systemctl list-unit-files | grep hub-xmlrpc-api | grep -q active && echo has_hubxmlrpc=true || echo has_hubxmlrpc=false" >>/var/lib/uyuni-tools/data +(test $($SSH {{ .SourceFqdn }} grep jdwp -r /etc/tomcat/conf.d/ /etc/rhn/taskomatic.conf | wc -l) -gt 0 && echo debug=true || echo debug=false) >>/var/lib/uyuni-tools/data echo "Altering configuration for domain resolution..." sed 's/report_db_host = {{ .SourceFqdn }}/report_db_host = localhost/' -i /etc/rhn/rhn.conf; @@ -153,14 +154,8 @@ sed 's/--add-modules java.annotation,com.sun.xml.bind://' -i /etc/tomcat/conf.d/ sed 's/-XX:-UseConcMarkSweepGC//' -i /etc/tomcat/conf.d/* test -f /etc/tomcat/conf.d/remote_debug.conf && sed 's/address=[^:]*:/address=*:/' -i /etc/tomcat/conf.d/remote_debug.conf -# Create a backup copy of the data to prepare DB upgrade. -# We need to upgrade the deployment before upgrading the database to get the SSL certificates ready. -# To avoid corrupting the database files, move them to where the upgrade script will expect them. -echo "Posgresql versions: image: $image_pg_version, current: $current_pg_version" -if test "$image_pg_version" != "$current_pg_version"; then - echo "Backing up the database files ..." - mv /var/lib/pgsql/data /var/lib/pgsql/data-pg$current_pg_version -fi +# Alter rhn.conf to ensure mirror is set to /mirror if set at all +sed 's/server.susemanager.fromdir =.*/server.susemanager.fromdir = \/mirror/' -i /etc/rhn/rhn.conf {{ if .Kubernetes }} echo 'server.no_ssl = 1' >> /etc/rhn/rhn.conf; @@ -191,7 +186,6 @@ if test "extractedSSL" != "1"; then # For third party certificates, the CA chain is in the certificate file. rsync -e "$SSH" --rsync-path='sudo rsync' -avz {{ .SourceFqdn }}:/etc/pki/tls/private/spacewalk.key /var/lib/uyuni-tools/ rsync -e "$SSH" --rsync-path='sudo rsync' -avz {{ .SourceFqdn }}:/etc/pki/tls/certs/spacewalk.crt /var/lib/uyuni-tools/ - fi echo "Removing useless ssl-build folder..." diff --git a/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go b/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go index 39c63e740..d4c22592d 100644 --- a/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go +++ b/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go @@ -13,6 +13,10 @@ import ( const postgresFinalizeScriptTemplate = `#!/bin/bash set -e +echo "Temporarily disable SSL in the posgresql configuration" +cp /var/lib/pgsql/data/postgresql.conf /var/lib/pgsql/data/postgresql.conf.bak +sed 's/^ssl/#ssl/' -i /var/lib/pgsql/data/postgresql.conf + {{ if .Migration }} echo "Adding database access for other containers..." db_user=$(sed -n '/^db_user/{s/^.*=[ \t]\+\(.*\)$/\1/ ; p}' /etc/rhn/rhn.conf) @@ -57,9 +61,12 @@ where not exists (select 1 from rhntaskorun r join rhntaskotemplate t on r.templ join rhntaskobunch b on t.bunch_id = b.id where b.name='update-system-overview-bunch' limit 1); EOT - echo "Stopping Postgresql..." su -s /bin/bash - postgres -c "/usr/share/postgresql/postgresql-script stop" + +echo "Enable SSL again" +cp /var/lib/pgsql/data/postgresql.conf.bak /var/lib/pgsql/data/postgresql.conf + echo "DONE" ` diff --git a/mgradm/shared/templates/pgsqlVersionUpgradeScriptTemplate.go b/mgradm/shared/templates/pgsqlVersionUpgradeScriptTemplate.go index a4e188bd6..ed9d7a90f 100644 --- a/mgradm/shared/templates/pgsqlVersionUpgradeScriptTemplate.go +++ b/mgradm/shared/templates/pgsqlVersionUpgradeScriptTemplate.go @@ -23,20 +23,23 @@ test -d /usr/lib/postgresql$NEW_VERSION/bin echo "Testing presence of postgresql$OLD_VERSION..." test -d /usr/lib/postgresql$OLD_VERSION/bin -# Data have already been backed up at the end of the migration script -# Reset the potentially created new pgsql data -rm -rf /var/lib/pgsql/data +# Create a backup copy of the data to prepare DB upgrade. +echo "Backing up the database files ..." +mv /var/lib/pgsql/data /var/lib/pgsql/data-pg$OLD_VERSION + echo "Create new database directory..." mkdir -p /var/lib/pgsql/data chown -R postgres:postgres /var/lib/pgsql -echo "Enforce key permission" -chown -R postgres:postgres /etc/pki/tls/private/pg-spacewalk.key -chown -R postgres:postgres /etc/pki/tls/certs/spacewalk.crt + +if [ -e /etc/pki/tls/private/pg-spacewalk.key ]; then + echo "Enforce key permission" + chown -R postgres:postgres /etc/pki/tls/private/pg-spacewalk.key + chown -R postgres:postgres /etc/pki/tls/certs/spacewalk.crt +fi echo "Initialize new postgresql $NEW_VERSION database..." . /etc/sysconfig/postgresql 2>/dev/null # Load locale for SUSE PGHOME=$(getent passwd postgres | cut -d ":" -f6) -#. $PGHOME/.i18n 2>/dev/null # Load locale for Enterprise Linux if [ -z $POSTGRES_LANG ]; then POSTGRES_LANG="en_US.UTF-8" [ ! -z $LC_CTYPE ] && POSTGRES_LANG=$LC_CTYPE @@ -47,9 +50,15 @@ echo "Any suggested command from the console should be run using postgres user" su -s /bin/bash - postgres -c "initdb -D /var/lib/pgsql/data --locale=$POSTGRES_LANG" echo "Successfully initialized new postgresql $NEW_VERSION database." +echo "Temporarily disable SSL in the old posgresql configuration" +cp /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf.bak +sed 's/^ssl/#ssl/' -i /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf su -s /bin/bash - postgres -c "pg_upgrade --old-bindir=/usr/lib/postgresql$OLD_VERSION/bin --new-bindir=/usr/lib/postgresql$NEW_VERSION/bin --old-datadir=/var/lib/pgsql/data-pg$OLD_VERSION --new-datadir=/var/lib/pgsql/data $FAST_UPGRADE" +echo "Enable SSL again" +cp /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf.bak /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf + cp /var/lib/pgsql/data-pg$OLD_VERSION/pg_hba.conf /var/lib/pgsql/data mv /var/lib/pgsql/data-pg$OLD_VERSION/pg_hba.conf /var/lib/pgsql/data-pg$OLD_VERSION/pg_hba.conf.migrated cp /var/lib/pgsql/data-pg$OLD_VERSION/postgresql.conf /var/lib/pgsql/data/ @@ -61,7 +70,6 @@ echo "DONE"` type PostgreSQLVersionUpgradeTemplateData struct { OldVersion string NewVersion string - Kubernetes bool } // Render will create PostgreSQL upgrade script. diff --git a/mgradm/shared/templates/reusedCaIssuerTemplate.go b/mgradm/shared/templates/reusedCaIssuerTemplate.go index 4bf514b5a..b4f3edfa2 100644 --- a/mgradm/shared/templates/reusedCaIssuerTemplate.go +++ b/mgradm/shared/templates/reusedCaIssuerTemplate.go @@ -7,13 +7,17 @@ package templates import ( "io" "text/template" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" ) const uyuniCaIssuer = `apiVersion: cert-manager.io/v1 kind: Issuer metadata: - name: uyuni-ca-issuer + name: ` + kubernetes.CaIssuerName + ` namespace: {{ .Namespace }} + labels: + app: ` + kubernetes.ServerApp + ` spec: ca: secretName: uyuni-ca @@ -25,6 +29,8 @@ type: kubernetes.io/tls metadata: name: uyuni-ca namespace: {{ .Namespace }} + labels: + app: ` + kubernetes.ServerApp + ` data: ca.crt: {{ .Certificate }} tls.crt: {{ .Certificate }} diff --git a/mgradm/shared/templates/tlsSecret.go b/mgradm/shared/templates/tlsSecret.go index bd4ecd6d0..934ae5a62 100644 --- a/mgradm/shared/templates/tlsSecret.go +++ b/mgradm/shared/templates/tlsSecret.go @@ -7,6 +7,8 @@ package templates import ( "io" "text/template" + + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" ) // Deploy self-signed issuer or CA Certificate and key. @@ -16,6 +18,8 @@ type: kubernetes.io/tls metadata: name: {{ .Name }} namespace: {{ .Namespace }} + labels: + app: ` + kubernetes.ServerApp + ` data: ca.crt: {{ .RootCa }} tls.crt: {{ .Certificate }} diff --git a/mgradm/shared/utils/cmd_utils.go b/mgradm/shared/utils/cmd_utils.go index c8d617120..4d4016f36 100644 --- a/mgradm/shared/utils/cmd_utils.go +++ b/mgradm/shared/utils/cmd_utils.go @@ -56,6 +56,39 @@ func AddHelmInstallFlag(cmd *cobra.Command) { _ = utils.AddFlagToHelpGroupID(cmd, "helm-certmanager-values", "helm") } +const volumesFlagsGroupID = "volumes" + +// AddVolumesFlags adds the Kubernetes volumes configuration parameters to the command. +func AddVolumesFlags(cmd *cobra.Command) { + cmd.Flags().String("volumes-class", "", L("Default storage class for all the volumes")) + cmd.Flags().String("volumes-mirror", "", + L("PersistentVolume name to use as a mirror. Empty means no mirror is used"), + ) + + _ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: volumesFlagsGroupID, Title: L("Volumes Configuration Flags")}) + _ = utils.AddFlagToHelpGroupID(cmd, "volumes-class", volumesFlagsGroupID) + _ = utils.AddFlagToHelpGroupID(cmd, "volumes-mirror", volumesFlagsGroupID) + + addVolumeFlags(cmd, "database", "var-pgsql", "50Gi") + addVolumeFlags(cmd, "packages", "var-spacewalk", "100Gi") + addVolumeFlags(cmd, "www", "srv-www", "100Gi") + addVolumeFlags(cmd, "cache", "var-cache", "10Gi") +} + +func addVolumeFlags(cmd *cobra.Command, name string, volumeName string, size string) { + sizeName := fmt.Sprintf("volumes-%s-size", name) + cmd.Flags().String( + sizeName, size, fmt.Sprintf(L("Requested size for the %s volume"), volumeName), + ) + _ = utils.AddFlagToHelpGroupID(cmd, sizeName, volumesFlagsGroupID) + + className := fmt.Sprintf("volumes-%s-class", name) + cmd.Flags().String( + className, "", fmt.Sprintf(L("Requested storage class for the %s volume"), volumeName), + ) + _ = utils.AddFlagToHelpGroupID(cmd, className, volumesFlagsGroupID) +} + // AddContainerImageFlags add container image flags to command. func AddContainerImageFlags( cmd *cobra.Command, diff --git a/mgradm/shared/utils/exec.go b/mgradm/shared/utils/exec.go index 55fb5b96c..8ea2130b5 100644 --- a/mgradm/shared/utils/exec.go +++ b/mgradm/shared/utils/exec.go @@ -55,12 +55,10 @@ func GeneratePgsqlVersionUpgradeScript( scriptDir string, oldPgVersion string, newPgVersion string, - kubernetes bool, ) (string, error) { data := templates.PostgreSQLVersionUpgradeTemplateData{ OldVersion: oldPgVersion, NewVersion: newPgVersion, - Kubernetes: kubernetes, } scriptName := "pgsqlVersionUpgrade.sh" diff --git a/mgradm/shared/utils/types.go b/mgradm/shared/utils/types.go index b8c71bbea..e8fdbc5f8 100644 --- a/mgradm/shared/utils/types.go +++ b/mgradm/shared/utils/types.go @@ -34,3 +34,28 @@ type CocoFlags struct { Image types.ImageFlags `mapstructure:",squash"` IsChanged bool } + +// VolumeFlags stores the persistent volume claims configuration. +type VolumesFlags struct { + // Class is the default storage class for all the persistent volume claims. + Class string + // Database is the configuration of the var-pgsql volume. + Database VolumeFlags + // Packages is the configuration of the var-spacewalk volume containing the synchronizede repositories. + Packages VolumeFlags + // Www is the configuration of the srv-www volume containing the imags and distributions. + Www VolumeFlags + // Cache is the configuration of the var-cache volume. + Cache VolumeFlags + // Mirror is the PersistentVolume name to use in case of a mirror setup. + // An empty value means no mirror will be used. + Mirror string +} + +// VolumeFlags is the configuration of one volume. +type VolumeFlags struct { + // Size is the requested size of the volume using kubernetes values like '100Gi'. + Size string + // Class is the storage class of the volume. + Class string +} diff --git a/mgrpxy/cmd/install/kubernetes/utils.go b/mgrpxy/cmd/install/kubernetes/utils.go index fe8ce6f0c..c65332986 100644 --- a/mgrpxy/cmd/install/kubernetes/utils.go +++ b/mgrpxy/cmd/install/kubernetes/utils.go @@ -60,7 +60,9 @@ func installForKubernetes(_ *types.GlobalFlags, } helmArgs := []string{"--set", "ingress=" + clusterInfos.Ingress} - helmArgs, err = shared_kubernetes.AddSCCSecret(helmArgs, flags.Helm.Proxy.Namespace, &flags.SCC) + helmArgs, err = shared_kubernetes.AddSCCSecret( + helmArgs, flags.Helm.Proxy.Namespace, &flags.SCC, shared_kubernetes.ProxyApp, + ) if err != nil { return err } diff --git a/mgrpxy/shared/kubernetes/deploy.go b/mgrpxy/shared/kubernetes/deploy.go index 4118932b6..2a037bcf4 100644 --- a/mgrpxy/shared/kubernetes/deploy.go +++ b/mgrpxy/shared/kubernetes/deploy.go @@ -86,7 +86,7 @@ func Deploy(imageFlags *utils.ProxyImageFlags, helmFlags *HelmFlags, configDir s "--set", "images.proxy-tftpd="+imageFlags.GetContainerImage("tftpd"), "--set", "repository="+imageFlags.Registry, "--set", "version="+imageFlags.Tag, - "--set", "pullPolicy="+kubernetes.GetPullPolicy(imageFlags.PullPolicy)) + "--set", "pullPolicy="+string(kubernetes.GetPullPolicy(imageFlags.PullPolicy))) helmParams = append(helmParams, helmArgs...) @@ -97,7 +97,7 @@ func Deploy(imageFlags *utils.ProxyImageFlags, helmFlags *HelmFlags, configDir s } // Wait for the pod to be started - return kubernetes.WaitForDeployment(helmFlags.Proxy.Namespace, helmAppName, "uyuni-proxy") + return kubernetes.WaitForDeployments(helmFlags.Proxy.Namespace, helmAppName) } func getSSHYaml(directory string) (string, error) { diff --git a/shared/kubernetes/apply.go b/shared/kubernetes/apply.go new file mode 100644 index 000000000..e36bb6666 --- /dev/null +++ b/shared/kubernetes/apply.go @@ -0,0 +1,62 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "os" + "path" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/cli-runtime/pkg/printers" +) + +// Apply runs kubectl apply for the provided objects. +// +// The message should be a user-friendly localized message to provide in case of error. +func Apply[T runtime.Object](objects []T, message string) error { + tempDir, cleaner, err := utils.TempDir() + if err != nil { + return err + } + defer cleaner() + + // Run the job + definitionPath := path.Join(tempDir, "definition.yaml") + if err := YamlFile(objects, definitionPath); err != nil { + return err + } + + if err := utils.RunCmdStdMapping(zerolog.DebugLevel, "kubectl", "apply", "-f", definitionPath); err != nil { + return utils.Errorf(err, message) + } + return nil +} + +// YamlFile generates a YAML file from a list of kubernetes objects. +func YamlFile[T runtime.Object](objects []T, path string) error { + printer := printers.YAMLPrinter{} + file, err := os.Create(path) + if err != nil { + return utils.Errorf(err, L("failed to create %s YAML file"), path) + } + defer func() { + if err := file.Close(); err != nil { + log.Error().Err(err).Msgf(L("failed to close %s YAML file"), path) + } + }() + + for _, obj := range objects { + err = printer.PrintObj(obj, file) + if err != nil { + return utils.Errorf(err, L("failed to write PVC to file")) + } + } + + return nil +} diff --git a/shared/kubernetes/converters.go b/shared/kubernetes/converters.go new file mode 100644 index 000000000..946931b77 --- /dev/null +++ b/shared/kubernetes/converters.go @@ -0,0 +1,43 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/types" + core "k8s.io/api/core/v1" +) + +// ConvertVolumeMounts converts the internal volume mounts into Kubernetes' ones. +func ConvertVolumeMounts(mounts []types.VolumeMount) []core.VolumeMount { + res := []core.VolumeMount{} + + for _, mount := range mounts { + converted := core.VolumeMount{ + Name: mount.Name, + MountPath: mount.MountPath, + } + res = append(res, converted) + } + + return res +} + +// ConvertPortMaps converts the internal port maps to Kubernetes ContainerPorts. +func ConvertPortMaps(ports []types.PortMap) []core.ContainerPort { + res := []core.ContainerPort{} + + for _, port := range ports { + protocol := core.ProtocolTCP + if port.Protocol == "udp" { + protocol = core.ProtocolUDP + } + converted := core.ContainerPort{ + ContainerPort: int32(port.Exposed), + Protocol: protocol, + } + res = append(res, converted) + } + return res +} diff --git a/shared/kubernetes/job.go b/shared/kubernetes/job.go new file mode 100644 index 000000000..05a3a73e9 --- /dev/null +++ b/shared/kubernetes/job.go @@ -0,0 +1,78 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "strings" + "time" + + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + batch "k8s.io/api/batch/v1" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// GetScriptJob prepares the definition of a kubernetes job running a shell script from a template. +// The name is suffixed with a time stamp to avoid collisions. +func GetScriptJob( + namespace string, + name string, + image string, + pullPolicy string, + pullSecret string, + mounts []types.VolumeMount, + template utils.Template, +) (*batch.Job, error) { + var maxFailures int32 + + // Convert our mounts to Kubernetes objects + volumeMounts := ConvertVolumeMounts(mounts) + volumes := CreateVolumes(mounts) + + // Prepare the script + scriptBuilder := new(strings.Builder) + if err := template.Render(scriptBuilder); err != nil { + return nil, err + } + + timestamp := time.Now().Format("20060102150405") + + // Create the job object running the script wrapped as a sh command + job := batch.Job{ + TypeMeta: meta.TypeMeta{Kind: "Job", APIVersion: "batch/v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: name + "-" + timestamp, + Namespace: namespace, + Labels: GetLabels(ServerApp, ""), + }, + Spec: batch.JobSpec{ + Template: core.PodTemplateSpec{ + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: "runner", + Image: image, + ImagePullPolicy: GetPullPolicy(pullPolicy), + Command: []string{"sh", "-c", scriptBuilder.String()}, + VolumeMounts: volumeMounts, + }, + }, + Volumes: volumes, + RestartPolicy: core.RestartPolicyNever, + }, + }, + BackoffLimit: &maxFailures, + }, + } + + if pullSecret != "" { + job.Spec.Template.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} + } + + return &job, nil +} diff --git a/shared/kubernetes/k3s.go b/shared/kubernetes/k3s.go index c9abc82b6..f2865fa6f 100644 --- a/shared/kubernetes/k3s.go +++ b/shared/kubernetes/k3s.go @@ -58,8 +58,6 @@ func shortenName(name string) string { "metrics": "mtrx", "postgresql": "pgsql", "exporter": "xport", - "uyuni-tcp": "uyuni", - "uyuni-udp": "uyuni", "uyuni-proxy-tcp": "uyuni", "uyuni-proxy-udp": "uyuni", } @@ -72,7 +70,7 @@ func shortenName(name string) string { func waitForTraefik() error { log.Info().Msg(L("Waiting for Traefik to be reloaded")) - for i := 0; i < 60; i++ { + for i := 0; i < 120; i++ { out, err := utils.RunCmdOutput(zerolog.TraceLevel, "kubectl", "get", "job", "-n", "kube-system", "-o", "jsonpath={.status.completionTime}", "helm-install-traefik") if err == nil { diff --git a/shared/kubernetes/kubernetes.go b/shared/kubernetes/kubernetes.go index 6a8a6e7d4..65dda8fcc 100644 --- a/shared/kubernetes/kubernetes.go +++ b/shared/kubernetes/kubernetes.go @@ -8,7 +8,6 @@ import ( "encoding/base64" "fmt" "os" - "path" "strings" "github.com/rs/zerolog" @@ -16,6 +15,9 @@ import ( . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" ) // ClusterInfos represent cluster information. @@ -101,7 +103,7 @@ func Restart(namespace string, app string) error { // Start starts the pod. func Start(namespace string, app string) error { // if something is running, we don't need to set replicas to 1 - if _, err := GetNode(namespace, "-lapp="+app); err != nil { + if _, err := GetNode(namespace, "-l"+AppLabel+"="+app); err != nil { return ReplicasTo(namespace, app, 1) } log.Debug().Msgf("Already running") @@ -154,7 +156,14 @@ func GetSecret(secretName string, filter string) (string, error) { } // createDockerSecret creates a secret of docker type to authenticate registries. -func createDockerSecret(namespace string, name string, registry string, username string, password string) error { +func createDockerSecret( + namespace string, + name string, + registry string, + username string, + password string, + appLabel string, +) error { authString := fmt.Sprintf("%s:%s", username, password) auth := base64.StdEncoding.EncodeToString([]byte(authString)) configjson := fmt.Sprintf( @@ -162,47 +171,44 @@ func createDockerSecret(namespace string, name string, registry string, username registry, username, password, auth, ) - secret := fmt.Sprintf(` -apiVersion: v1 -kind: Secret -type: kubernetes.io/dockerconfigjson -metadata: - namespace: %s - name: %s -data: - .dockerconfigjson: %s -`, namespace, name, base64.StdEncoding.EncodeToString([]byte(configjson))) - - tempDir, cleaner, err := utils.TempDir() - if err != nil { - return err - } - defer cleaner() - - // Run the job - definitionPath := path.Join(tempDir, "definition.yaml") - if err := os.WriteFile(definitionPath, []byte(secret), 0600); err != nil { - return utils.Errorf(err, L("failed to write %s secret definition file"), name) - } + secret := core.Secret{ + TypeMeta: meta.TypeMeta{APIVersion: "v1", Kind: "Secret"}, + ObjectMeta: meta.ObjectMeta{ + Namespace: namespace, + Name: name, + Labels: GetLabels(appLabel, ""), + }, + // It seems serializing this object automatically transforms the secrets to base64. + Data: map[string][]byte{ + ".dockerconfigjson": []byte(configjson), + }, + Type: core.SecretTypeDockerConfigJson, + } + return Apply([]runtime.Object{&secret}, fmt.Sprintf(L("failed to create the %s docker secret"), name)) +} - if err := utils.RunCmdStdMapping(zerolog.DebugLevel, "kubectl", "apply", "-f", definitionPath); err != nil { - return utils.Errorf(err, L("failed to define %s secret"), name) +// AddSccSecret creates a secret holding the SCC credentials and adds it to the helm args. +func AddSCCSecret(helmArgs []string, namespace string, scc *types.SCCCredentials, appLabel string) ([]string, error) { + secret, err := GetSCCSecret(namespace, scc, appLabel) + if secret != "" { + helmArgs = append(helmArgs, secret) } - return nil + return helmArgs, err } -// AddSCCSecret creates a secret holding the SCC credentials and adds it to the helm args. -func AddSCCSecret(helmArgs []string, namespace string, scc *types.SCCCredentials) ([]string, error) { +// GetSCCSecret creates a secret holding the SCC credentials and returns the secret name. +func GetSCCSecret(namespace string, scc *types.SCCCredentials, appLabel string) (string, error) { + const secretName = "scc-credentials" + if scc.User != "" && scc.Password != "" { - secretName := "scc-credentials" if err := createDockerSecret( - namespace, secretName, "registry.suse.com", scc.User, scc.Password, + namespace, secretName, "registry.suse.com", scc.User, scc.Password, appLabel, ); err != nil { - return helmArgs, err + return "", err } - helmArgs = append(helmArgs, "--set", "registrySecret="+secretName) + return secretName, nil } - return helmArgs, nil + return "", nil } // GetDeploymentImagePullSecret returns the name of the image pull secret of a deployment. diff --git a/shared/kubernetes/pod.go b/shared/kubernetes/pod.go new file mode 100644 index 000000000..f047e7671 --- /dev/null +++ b/shared/kubernetes/pod.go @@ -0,0 +1,103 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + "path" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" + meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// RunPodLogs runs a pod, waits for it to finish and returns it logs. +// +// This should be used only to run very fast tasks. +func RunPodLogs( + namespace string, + name string, + image string, + pullPolicy string, + pullSecret string, + volumesMounts []types.VolumeMount, + cmd ...string, +) ([]byte, error) { + // Read the file from the volume from a container into stdout + mounts := ConvertVolumeMounts(volumesMounts) + volumes := CreateVolumes(volumesMounts) + + // Use a pod here since this is a very simple task reading out a file from a volume + pod := core.Pod{ + TypeMeta: meta.TypeMeta{Kind: "Pod", APIVersion: "v1"}, + ObjectMeta: meta.ObjectMeta{ + Name: name, + Namespace: namespace, + Labels: map[string]string{"app": name}, + }, + Spec: core.PodSpec{ + Containers: []core.Container{ + { + Name: name, + Image: image, + ImagePullPolicy: GetPullPolicy(pullPolicy), + Command: cmd, + VolumeMounts: mounts, + }, + }, + Volumes: volumes, + RestartPolicy: core.RestartPolicyNever, + }, + } + + if pullSecret != "" { + pod.Spec.ImagePullSecrets = []core.LocalObjectReference{{Name: pullSecret}} + } + + tempDir, cleaner, err := utils.TempDir() + if err != nil { + return nil, err + } + defer cleaner() + + // Run the pod + podPath := path.Join(tempDir, "pod.yaml") + if err := YamlFile([]runtime.Object{&pod}, podPath); err != nil { + return nil, err + } + + if err := utils.RunCmd("kubectl", "apply", "-f", podPath); err != nil { + return nil, utils.Errorf(err, L("failed to run the %s pod"), name) + } + if err := Apply( + []runtime.Object{&pod}, fmt.Sprintf(L("failed to run the %s pod"), name), + ); err != nil { + return nil, err + } + + if err := WaitForPod(namespace, name, 60); err != nil { + return nil, err + } + + data, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "logs", "-n", namespace, name) + if err != nil { + return nil, utils.Errorf(err, L("failed to get the %s pod logs"), name) + } + + defer func() { + if err := DeletePod(namespace, name, "-lapp="+name); err != nil { + log.Err(err).Msgf(L("failed to delete the %s pod"), name) + } + }() + + return data, nil +} diff --git a/shared/kubernetes/pvc.go b/shared/kubernetes/pvc.go new file mode 100644 index 000000000..d0a3c8cb6 --- /dev/null +++ b/shared/kubernetes/pvc.go @@ -0,0 +1,264 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// CreatePersistentVolumeClaims creates all the PVCs described by the mounts. +func CreatePersistentVolumeClaims( + namespace string, + mounts []types.VolumeMount, +) error { + pvcs := GetPersistentVolumeClaims( + namespace, + "", + core.ReadWriteOnce, + false, + GetLabels(ServerApp, ""), + mounts, + ) + + for _, pvc := range pvcs { + if !hasPersistentVolumeClaim(pvc.ObjectMeta.Namespace, pvc.ObjectMeta.Name) { + if err := Apply( + []*core.PersistentVolumeClaim{pvc}, + fmt.Sprintf(L("failed to create %s persistent volume claim"), pvc.ObjectMeta.Name), + ); err != nil { + return err + } + } + } + return nil +} + +func hasPersistentVolumeClaim(namespace string, name string) bool { + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "pvc", "-n", namespace, name, "-o", "name") + return err == nil && strings.TrimSpace(string(out)) != "" +} + +// Contains the data extracted from the PV to create the linked PVC for it. +type pvData struct { + ClaimRef struct { + Name string + Namespace string + } + StorageClass string + AccessModes []core.PersistentVolumeAccessMode + Size string +} + +// CreatePersistentVolumeClaimForVolume creates a PVC bound to a specific Volume. +func CreatePersistentVolumeClaimForVolume( + namespace string, + volumeName string, +) error { + // Get the PV Storage class and claimRef + out, err := utils.RunCmdOutput(zerolog.DebugLevel, + "kubectl", "get", "pv", volumeName, "-n", namespace, + "-o", `jsonpath={"{\"claimRef\": "}{.spec.claimRef}, "storageClass": "{.spec.storageClassName}", `+ + `"accessModes": {.spec.accessModes}, "size": "{.spec.capacity.storage}{"\"}"}`, + ) + if err != nil { + return err + } + var pv pvData + if err := json.Unmarshal(out, &pv); err != nil { + return utils.Errorf(err, L("failed to parse pv data")) + } + + // Ensure the claimRef of the volume is for our PVC + if pv.ClaimRef.Name != volumeName && pv.ClaimRef.Namespace != namespace { + return fmt.Errorf(L("the %[1]s volume should reference the %[2]s claim in %[3]s namespace"), volumeName, namespace) + } + + // Create the PVC object + pvc := newPersistentVolumeClaim(namespace, volumeName, pv.StorageClass, pv.Size, pv.AccessModes, false) + + return Apply([]runtime.Object{&pvc}, L("failed to run the persistent volume claims")) +} + +// GetPersistentVolumeClaims creates the PVC objects matching a list of volume mounts. +func GetPersistentVolumeClaims( + namespace string, + storageClass string, + accessMode core.PersistentVolumeAccessMode, + matchPvByLabel bool, + labels map[string]string, + mounts []types.VolumeMount, +) []*core.PersistentVolumeClaim { + var claims []*core.PersistentVolumeClaim + + for _, mount := range mounts { + size := mount.Size + if size == "" { + log.Warn().Msgf(L("no size defined for PersistentVolumeClaim %s, using 10Mi as default"), mount.Name) + size = "10Mi" + } + pv := newPersistentVolumeClaim( + namespace, + mount.Name, + storageClass, + size, + []core.PersistentVolumeAccessMode{accessMode}, + matchPvByLabel, + ) + pv.ObjectMeta.SetLabels(labels) + claims = append(claims, &pv) + } + + return claims +} + +// Creates a PVC from a few common values. +func newPersistentVolumeClaim( + namespace string, + name string, + storageClass string, + size string, + accessModes []core.PersistentVolumeAccessMode, + matchPvByLabel bool, +) core.PersistentVolumeClaim { + pvc := core.PersistentVolumeClaim{ + TypeMeta: v1.TypeMeta{ + APIVersion: "v1", + Kind: "PersistentVolumeClaim", + }, + ObjectMeta: v1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Spec: core.PersistentVolumeClaimSpec{ + AccessModes: accessModes, + Resources: core.VolumeResourceRequirements{ + Requests: core.ResourceList{"storage": resource.MustParse(size)}, + }, + }, + } + + if storageClass != "" { + pvc.Spec.StorageClassName = &storageClass + } + + if matchPvByLabel { + pvc.Spec.Selector = &v1.LabelSelector{ + MatchLabels: map[string]string{"data": name}, + } + } + + return pvc +} + +func createMount(mountPath string) core.VolumeMount { + pattern := regexp.MustCompile("[^a-zA-Z]+") + name := strings.Trim(pattern.ReplaceAllString(mountPath, "-"), "-") + return core.VolumeMount{ + MountPath: mountPath, + Name: name, + } +} + +// CreateTmpfsMount creates a temporary volume and its mount. +func CreateTmpfsMount(mountPath string, size string) (core.VolumeMount, core.Volume) { + mount := createMount(mountPath) + + parsedSize := resource.MustParse(size) + volume := core.Volume{ + Name: mount.Name, + VolumeSource: core.VolumeSource{ + EmptyDir: &core.EmptyDirVolumeSource{ + Medium: core.StorageMediumMemory, + SizeLimit: &parsedSize, + }, + }, + } + return mount, volume +} + +// CreateHostPathMount creates the mount and volume for a host path. +// This is not secure and tied to the availability on the node, only use when needed. +func CreateHostPathMount( + mountPath string, + hostPath string, + sourceType core.HostPathType, +) (core.VolumeMount, core.Volume) { + mount := createMount(mountPath) + + volume := core.Volume{ + Name: mount.Name, + VolumeSource: core.VolumeSource{ + HostPath: &core.HostPathVolumeSource{ + Path: hostPath, + Type: &sourceType, + }, + }, + } + return mount, volume +} + +// CreateSecretMount creates the volume for a secret. +func CreateSecretVolume(name string, secretName string) core.Volume { + volume := core.Volume{ + Name: name, + VolumeSource: core.VolumeSource{ + Secret: &core.SecretVolumeSource{ + SecretName: secretName, + }, + }, + } + + return volume +} + +// CreateConfigVolume creates the volume for a ConfigMap. +func CreateConfigVolume(name string, configMapName string) core.Volume { + volume := core.Volume{ + Name: name, + VolumeSource: core.VolumeSource{ + ConfigMap: &core.ConfigMapVolumeSource{ + LocalObjectReference: core.LocalObjectReference{ + Name: configMapName, + }, + }, + }, + } + + return volume +} + +// CreateVolumes creates PVC-based volumes matching the internal volumes mounts. +func CreateVolumes(mounts []types.VolumeMount) []core.Volume { + volumes := []core.Volume{} + + for _, mount := range mounts { + volume := core.Volume{ + Name: mount.Name, + VolumeSource: core.VolumeSource{ + PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{ + ClaimName: mount.Name, + }, + }, + } + volumes = append(volumes, volume) + } + + return volumes +} + +var runCmdOutput = utils.RunCmdOutput diff --git a/shared/kubernetes/pvc_test.go b/shared/kubernetes/pvc_test.go new file mode 100644 index 000000000..cdd2ddff1 --- /dev/null +++ b/shared/kubernetes/pvc_test.go @@ -0,0 +1,34 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "errors" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/testutils" +) + +func TestHasPersistentVolumeClaim(t *testing.T) { + type dataType struct { + err error + out string + expected bool + } + data := []dataType{ + {nil, "persistentvolumeclaim/var-pgsql\n", true}, + {errors.New("PVC not found"), "", false}, + } + + for i, test := range data { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { + return []byte(test.out), test.err + } + actual := hasPersistentVolumeClaim("myns", "thepvc") + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected output", i), test.expected, actual) + } +} diff --git a/shared/kubernetes/utils.go b/shared/kubernetes/utils.go index 34df9518f..5fa832db6 100644 --- a/shared/kubernetes/utils.go +++ b/shared/kubernetes/utils.go @@ -6,7 +6,6 @@ package kubernetes import ( "encoding/json" - "errors" "fmt" "strconv" "strings" @@ -17,102 +16,86 @@ import ( . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" + core "k8s.io/api/core/v1" ) -// ServerApp represent the server app name. -const ServerApp = "uyuni" +const ( + // AppLabel is the app label name. + AppLabel = "app.kubernetes.io/part-of" + // ComponentLabel is the component label name. + ComponentLabel = "app.kubernetes.io/component" +) -// ServerFilter represents filter used to check server app. -const ServerFilter = "-lapp=" + ServerApp +const ( + // ServerApp is the server app name. + ServerApp = "uyuni" -// ProxyApp represnet the proxy app name. -const ProxyApp = "uyuni-proxy" + // ProxyApp is the proxy app name. + ProxyApp = "uyuni-proxy" +) -// ProxyFilter represents filter used to check proxy app. -const ProxyFilter = "-lapp=" + ProxyApp +const ( + // ServerComponent is the value of the component label for the server resources. + ServerComponent = "server" + // HubApiComponent is the value of the component label for the Hub API resources. + HubAPIComponent = "hub-api" + // CocoComponent is the value of the component label for the confidential computing attestation resources. + CocoComponent = "coco" +) -// WaitForDeployment waits at most 60s for a kubernetes deployment to have at least one replica. -// See [isDeploymentReady] for more details. -func WaitForDeployment(namespace string, name string, appName string) error { - // Find the name of a replica pod - // Using the app label is a shortcut, not the 100% acurate way to get from deployment to pod - podName := "" - jsonpath := fmt.Sprintf("jsonpath={.items[?(@.metadata.labels.app==\"%s\")].metadata.name}", appName) - cmdArgs := []string{"get", "pod", "-o", jsonpath} - cmdArgs = addNamespace(cmdArgs, namespace) +// ServerFilter represents filter used to check server app. +const ServerFilter = "-l" + AppLabel + "=" + ServerApp - for i := 0; i < 60; i++ { - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", cmdArgs...) - if err == nil { - podName = string(out) - break - } - } +// ServerFilter represents filter used to check proxy app. +const ProxyFilter = "-l" + AppLabel + "=" + ProxyApp - // We need to wait for the image to be pulled as this can add quite some time - // Setting a timeout on this is very hard since it hightly depends on network speed and image size - // List the Pulled events from the pod as we may not see the Pulling if the image was already downloaded - err := WaitForPulledImage(namespace, podName) - if err != nil { - return utils.Errorf(err, L("failed to pull image")) - } +// CaIssuerName is the name of the server CA issuer deployed if cert-manager is used. +const CaIssuerName = "uyuni-ca-issuer" - log.Info().Msgf(L("Waiting for %[1]s deployment to be ready in %[2]s namespace\n"), name, namespace) - // Wait for a replica to be ready - for i := 0; i < 120; i++ { - // TODO Look for pod failures - if IsDeploymentReady(namespace, name) { - return nil - } - time.Sleep(1 * time.Second) +// GetLabels creates the label map with the app and component. +// The component label may be an empty string to skip it. +func GetLabels(app string, component string) map[string]string { + labels := map[string]string{ + AppLabel: app, + } + if component != "" { + labels[ComponentLabel] = component } - return fmt.Errorf( - L("failed to find a ready replica for deployment %[1]s in namespace %[2]s after 120s"), name, namespace, - ) + return labels } -// WaitForPulledImage wait that image is pulled. -func WaitForPulledImage(namespace string, podName string) error { - log.Info().Msgf(L("Waiting for image of %[1]s pod in %[2]s namespace to be pulled"), podName, namespace) - pulledArgs := []string{"get", "event", - "-o", "jsonpath={.items[?(@.reason==\"Pulled\")].message}", - "--field-selector", "involvedObject.name=" + podName} - - pulledArgs = addNamespace(pulledArgs, namespace) - failedArgs := []string{"get", "event", - "-o", "jsonpath={range .items[?(@.reason==\"Failed\")]}{.message}{\"\\n\"}{end}", - "--field-selector", "involvedObject.name=" + podName} - failedArgs = addNamespace(failedArgs, namespace) - for { - // Look for events indicating an image pull issue - out, err := utils.RunCmdOutput(zerolog.TraceLevel, "kubectl", failedArgs...) - if err != nil { - return fmt.Errorf(L("failed to get failed events for pod %s"), podName) - } - lines := strings.Split(string(out), "\n") - for _, line := range lines { - if strings.HasPrefix(line, "Failed to pull image") { - return errors.New(L("failed to pull image")) +// WaitForDeployment waits for a kubernetes deployment to have at least one replica. +func WaitForDeployments(namespace string, names ...string) error { + log.Info().Msgf( + NL("Waiting for %[1]s deployment to be ready in %[2]s namespace\n", + "Waiting for %[1]s deployments to be ready in %[2]s namespace\n", len(names)), + strings.Join(names, ", "), namespace) + + deploymentsStarting := names + // Wait for ever for all deployments to be ready + for len(deploymentsStarting) > 0 { + starting := []string{} + for _, deploymentName := range deploymentsStarting { + ready, err := IsDeploymentReady(namespace, deploymentName) + if err != nil { + return err + } + if !ready { + starting = append(starting, deploymentName) } + deploymentsStarting = starting } - - // Has the image pull finished? - out, err = utils.RunCmdOutput(zerolog.TraceLevel, "kubectl", pulledArgs...) - if err != nil { - return fmt.Errorf(L("failed to get events for pod %s"), podName) + if len(deploymentsStarting) > 0 { + time.Sleep(1 * time.Second) } - if len(out) > 0 { - break - } - time.Sleep(1 * time.Second) } return nil } // IsDeploymentReady returns true if a kubernetes deployment has at least one ready replica. -// The name can also be a filter parameter like -lapp=uyuni. +// // An empty namespace means searching through all the namespaces. -func IsDeploymentReady(namespace string, name string) bool { +func IsDeploymentReady(namespace string, name string) (bool, error) { jsonpath := fmt.Sprintf("jsonpath={.items[?(@.metadata.name==\"%s\")].status.readyReplicas}", name) args := []string{"get", "-o", jsonpath, "deploy"} args = addNamespace(args, namespace) @@ -121,10 +104,67 @@ func IsDeploymentReady(namespace string, name string) bool { // kubectl errors out if the deployment or namespace doesn't exist if err == nil { if replicas, _ := strconv.Atoi(string(out)); replicas > 0 { - return true + return true, nil + } + } + + // Search for the replica set matching the deployment + rsArgs := []string{ + "get", "rs", "-o", + fmt.Sprintf("jsonpath={.items[?(@.metadata.ownerReferences[0].name=='%s')].metadata.name}", name), + } + rsArgs = addNamespace(rsArgs, namespace) + out, err = utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", rsArgs...) + if err != nil { + return false, utils.Errorf(err, L("failed to find ReplicaSet for deployment %s"), name) + } + rs := strings.TrimSpace(string(out)) + + // Check if all replica set pods have failed to start + jsonpath = fmt.Sprintf("jsonpath={.items[?(@.metadata.ownerReferences[0].name=='%s')].metadata.name}", rs) + podArgs := []string{"get", "pod", "-o", jsonpath} + podArgs = addNamespace(podArgs, namespace) + out, err = utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", podArgs...) + if err != nil { + return false, utils.Errorf(err, L("failed to find pods for %s deployment"), name) + } + pods := strings.Split(string(out), " ") + failedPods := 0 + for _, podName := range pods { + if failed, err := isPodFailed(namespace, podName); err != nil { + return false, err + } else if failed { + failedPods = failedPods + 1 } } - return false + if failedPods == len(pods) { + return false, fmt.Errorf(L("all the pods of %s deployment have a failure"), name) + } + + return false, nil +} + +// isPodFailed checks if any of the containers of the pod are in BackOff state. +// +// An empty namespace means searching through all the namespaces. +func isPodFailed(namespace string, name string) (bool, error) { + // If a container failed to pull the image it status will have waiting.reason = ImagePullBackOff + // If a container crashed its status will have waiting.reason = CrashLoopBackOff + filter := fmt.Sprintf(".items[?(@.metadata.name==\"%s\")]", name) + jsonpath := fmt.Sprintf("jsonpath={%[1]s.status.containerStatuses[*].state.waiting.reason}"+ + "{%[1]s.status.initContainerStatuses[*].state.waiting.reason}", filter) + args := []string{"get", "pod", "-n", namespace, "-o", jsonpath} + args = addNamespace(args, namespace) + + out, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", args...) + if err != nil { + return true, utils.Errorf(err, L("failed to get the status of %s pod"), name) + } + statuses := string(out) + if strings.Contains(statuses, "CrashLoopBackOff") || strings.Contains(statuses, "ImagePullBackOff") { + return true, nil + } + return false, nil } // DeploymentStatus represents the kubernetes deployment status. @@ -162,7 +202,7 @@ func ReplicasTo(namespace string, app string, replica uint) error { return utils.Errorf(err, L("cannot run kubectl %s"), args) } - pods, err := GetPods(namespace, "-lapp="+app) + pods, err := GetPods(namespace, "-l"+AppLabel+"="+app) if err != nil { return utils.Errorf(err, L("cannot get pods for %s"), app) } @@ -263,12 +303,12 @@ func addNamespace(args []string, namespace string) []string { return args } -// GetPullPolicy return pullpolicy in lower case, if exists. -func GetPullPolicy(name string) string { - policies := map[string]string{ - "always": "Always", - "never": "Never", - "ifnotpresent": "IfNotPresent", +// GetPullPolicy returns the kubernetes PullPolicy value, if exists. +func GetPullPolicy(name string) core.PullPolicy { + policies := map[string]core.PullPolicy{ + "always": core.PullAlways, + "never": core.PullNever, + "ifnotpresent": core.PullIfNotPresent, } policy := policies[strings.ToLower(name)] if policy == "" { diff --git a/shared/kubernetes/waiters.go b/shared/kubernetes/waiters.go new file mode 100644 index 000000000..4d280f0ba --- /dev/null +++ b/shared/kubernetes/waiters.go @@ -0,0 +1,100 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "fmt" + "strings" + "time" + + "github.com/rs/zerolog" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// WaitForSecret waits for a secret to be available. +func WaitForSecret(namespace string, secret string) { + for i := 0; ; i++ { + if err := utils.RunCmd("kubectl", "get", "-n", namespace, "secret", secret); err == nil { + break + } + time.Sleep(1 * time.Second) + } +} + +// WaitForJob waits for a job to be completed before timeout seconds. +// +// If the timeout value is 0 the job will be awaited for for ever. +func WaitForJob(namespace string, name string, timeout int) error { + for i := 0; ; i++ { + status, err := jobStatus(namespace, name) + if err != nil { + return err + } + if status == "error" { + return fmt.Errorf( + L("%[1]s job failed, run kubectl logs -n %[2]s --tail=-1 -ljob-name=%[1]s for details"), + name, namespace, + ) + } + if status == "success" { + return nil + } + + if timeout > 0 && i == timeout { + return fmt.Errorf(L("%[1]s job failed to complete within %[2]d seconds"), name, timeout) + } + time.Sleep(1 * time.Second) + } +} + +func jobStatus(namespace string, name string) (string, error) { + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "job", "-n", namespace, name, + "-o", "jsonpath={.status.succeeded},{.status.failed}", + ) + if err != nil { + return "", utils.Errorf(err, L("failed to get %s job status"), name) + } + results := strings.SplitN(strings.TrimSpace(string(out)), ",", 2) + if len(results) != 2 { + return "", fmt.Errorf(L("invalid job status response: '%s'"), string(out)) + } + if results[0] == "1" { + return "success", nil + } else if results[1] == "1" { + return "error", nil + } + return "", nil +} + +// WaitForPod waits for a pod to complete before timeout seconds. +// +// If the timeout value is 0 the pod will be awaited for for ever. +func WaitForPod(namespace string, pod string, timeout int) error { + for i := 0; ; i++ { + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "pod", "-n", namespace, pod, + "-o", "jsonpath={.status.containerStatuses[0].state.terminated.reason}", + ) + if err != nil { + return utils.Errorf(err, L("failed to get %s pod status"), pod) + } + status := strings.TrimSpace(string(out)) + if status != "" { + if status == "Completed" { + return nil + } + return fmt.Errorf(L("%[1]s pod failed with status %[2]s"), pod, status) + } + + if timeout > 0 && i == timeout { + return fmt.Errorf(L("%[1]s pod failed to complete within %[2]d seconds"), pod, timeout) + } + time.Sleep(1 * time.Second) + } +} diff --git a/shared/ssl/ssl.go b/shared/ssl/ssl.go index 730f4561d..cfc91b8d9 100644 --- a/shared/ssl/ssl.go +++ b/shared/ssl/ssl.go @@ -7,6 +7,7 @@ package ssl import ( "bytes" "errors" + "io" "os" "os/exec" "strings" @@ -240,14 +241,22 @@ func optionalFile(file string) { } } -// GetRsaKey converts an SSL key to RSA. -func GetRsaKey(keyPath string, password string) []byte { +// Converts an SSL key to RSA. +func GetRsaKey(keyContent string, password string) []byte { // Kubernetes only handles RSA private TLS keys, convert and strip password caPassword := password utils.AskPasswordIfMissing(&caPassword, L("Source server SSL CA private key password"), 0, 0) // Convert the key file to RSA format for kubectl to handle it - cmd := exec.Command("openssl", "rsa", "-in", keyPath, "-passin", "env:pass") + cmd := exec.Command("openssl", "rsa", "-passin", "env:pass") + stdin, err := cmd.StdinPipe() + if err != nil { + log.Fatal().Err(err).Msg(L("Failed to open openssl rsa process input stream")) + } + if _, err := io.WriteString(stdin, keyContent); err != nil { + log.Fatal().Err(err).Msg(L("Failed to write openssl key content to input stream")) + } + cmd.Env = append(cmd.Env, "pass="+caPassword) out, err := cmd.Output() if err != nil { @@ -255,3 +264,20 @@ func GetRsaKey(keyPath string, password string) []byte { } return out } + +// StripTextFromCertificate removes the optional text part of an x509 certificate. +func StripTextFromCertificate(certContent string) []byte { + cmd := exec.Command("openssl", "x509") + stdin, err := cmd.StdinPipe() + if err != nil { + log.Fatal().Err(err).Msg(L("Failed to open openssl x509 process input stream")) + } + if _, err := io.WriteString(stdin, certContent); err != nil { + log.Fatal().Err(err).Msg(L("Failed to write SSL certificate to input stream")) + } + out, err := cmd.Output() + if err != nil { + log.Fatal().Err(err).Msg(L("failed to strip text part from CA certificate")) + } + return out +} diff --git a/shared/ssl/ssl_test.go b/shared/ssl/ssl_test.go index b5d055f93..1d0b5342f 100644 --- a/shared/ssl/ssl_test.go +++ b/shared/ssl/ssl_test.go @@ -152,7 +152,8 @@ func TestOrderCasChain2(t *testing.T) { } func TestGetRsaKey(t *testing.T) { - actual := string(GetRsaKey("testdata/RootCA.key", "secret")) + key := testutils.ReadFile(t, "testdata/RootCA.key") + actual := string(GetRsaKey(key, "secret")) if !strings.HasPrefix(actual, "-----BEGIN PRIVATE KEY-----\nMIIEugIBADANBgkqhkiG9w0BAQEFAAS") || !strings.HasSuffix(actual, "DKY9SmW6QD+RJwbMc4M=\n-----END PRIVATE KEY-----\n") { t.Errorf("Unexpected generated RSA key: %s", actual) diff --git a/shared/testutils/flagstests/mgradm.go b/shared/testutils/flagstests/mgradm.go index d0b5ee6d0..416ae85e3 100644 --- a/shared/testutils/flagstests/mgradm.go +++ b/shared/testutils/flagstests/mgradm.go @@ -42,6 +42,34 @@ func AssertServerHelmFlags(t *testing.T, flags *utils.HelmFlags) { ) } +// VolumesFlagsTestExpected is the expected values for AssertVolumesFlags. +var VolumesFlagsTestExpected = []string{ + "--volumes-class", "MyStorageClass", + "--volumes-mirror", "mirror-pv", + "--volumes-database-size", "123Gi", + "--volumes-database-class", "dbclass", + "--volumes-packages-size", "456Gi", + "--volumes-packages-class", "pkgclass", + "--volumes-www-size", "123Mi", + "--volumes-www-class", "wwwclass", + "--volumes-cache-size", "789Gi", + "--volumes-cache-class", "cacheclass", +} + +// AssertVolumesFlags checks that all the volumes flags are parsed correctly. +func AssertVolumesFlags(t *testing.T, flags *utils.VolumesFlags) { + testutils.AssertEquals(t, "Error parsing --volumes-class", "MyStorageClass", flags.Class) + testutils.AssertEquals(t, "Error parsing --volumes-mirror", "mirror-pv", flags.Mirror) + testutils.AssertEquals(t, "Error parsing --volumes-database-size", "123Gi", flags.Database.Size) + testutils.AssertEquals(t, "Error parsing --volumes-database-class", "dbclass", flags.Database.Class) + testutils.AssertEquals(t, "Error parsing --volumes-packages-size", "456Gi", flags.Packages.Size) + testutils.AssertEquals(t, "Error parsing --volumes-packages-class", "pkgclass", flags.Packages.Class) + testutils.AssertEquals(t, "Error parsing --volumes-www-size", "123Mi", flags.Www.Size) + testutils.AssertEquals(t, "Error parsing --volumes-www-class", "wwwclass", flags.Www.Class) + testutils.AssertEquals(t, "Error parsing --volumes-cache-size", "789Gi", flags.Cache.Size) + testutils.AssertEquals(t, "Error parsing --volumes-cache-class", "cacheclass", flags.Cache.Class) +} + // ImageFlagsTestArgs is the expected values for AssertImageFlag. var ImageFlagsTestArgs = []string{ "--image", "path/to/image", @@ -58,7 +86,7 @@ func AssertImageFlag(t *testing.T, flags *types.ImageFlags) { testutils.AssertEquals(t, "Error parsing --pullPolicy", "never", flags.PullPolicy) } -// DBUpdateImageFlagTestArgs is the expected values for AssertDbUpgradeImageFlag. +// DBUpdateImageFlagTestArgs is the expected values for AssertDBUpgradeImageFlag. var DBUpdateImageFlagTestArgs = []string{ "--dbupgrade-image", "dbupgradeimg", "--dbupgrade-tag", "dbupgradetag", diff --git a/shared/types/deployment.go b/shared/types/deployment.go index c9563168e..164725d21 100644 --- a/shared/types/deployment.go +++ b/shared/types/deployment.go @@ -7,6 +7,8 @@ package types type VolumeMount struct { MountPath string `json:"mountPath,omitempty"` Name string `json:"name,omitempty"` + Size string `json:"size,omitempty"` + Class string `json:"class,omitempty"` } // Container type used for mapping pod definition structure. diff --git a/shared/utils/exec.go b/shared/utils/exec.go index 574cfc336..bed607f40 100644 --- a/shared/utils/exec.go +++ b/shared/utils/exec.go @@ -5,6 +5,8 @@ package utils import ( + "bytes" + "errors" "fmt" "os/exec" "strings" @@ -64,11 +66,18 @@ func RunCmdOutput(logLevel zerolog.Level, command string, args ...string) ([]byt s.Start() // Start the spinner } localLogger.Debug().Msgf("Running: %s %s", command, strings.Join(args, " ")) - output, err := exec.Command(command, args...).Output() + cmd := exec.Command(command, args...) + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + output, err := cmd.Output() if logLevel != zerolog.Disabled { s.Stop() } localLogger.Trace().Msgf("Command output: %s, error: %s", output, err) + message := strings.TrimSpace(errBuf.String()) + if message != "" { + err = errors.New(message) + } return output, err } diff --git a/shared/utils/inspector.go b/shared/utils/inspector.go index 5e4de9da9..c446466a7 100644 --- a/shared/utils/inspector.go +++ b/shared/utils/inspector.go @@ -83,6 +83,13 @@ func ReadInspectData[T any](dataFile string) (*T, error) { return nil, Errorf(err, L("cannot read file %s"), dataFile) } + return ReadInspectDataString[T](data) +} + +// ReadInspectDataString returns an unmarshalled object of type T from the data as a string. +// +// This function is most likely to be used for the implementation of the inspectors, but can also be used directly. +func ReadInspectDataString[T any](data []byte) (*T, error) { viper.SetConfigType("env") if err := viper.MergeConfig(bytes.NewBuffer(data)); err != nil { return nil, Errorf(err, L("cannot read config")) diff --git a/shared/utils/ports.go b/shared/utils/ports.go index 6adc0b5d3..01b2e16a5 100644 --- a/shared/utils/ports.go +++ b/shared/utils/ports.go @@ -4,19 +4,39 @@ package utils -import "github.com/uyuni-project/uyuni-tools/shared/types" - -// ServerTCPServiceName is the name of the server TCP service. -const ServerTCPServiceName = "uyuni-tcp" - -// ServerUDPServiceName is the name of the server UDP service. -const ServerUDPServiceName = "uyuni-udp" - -// ProxyTCPServiceName is the name of the proxy TCP service. -const ProxyTCPServiceName = "uyuni-proxy-tcp" - -// ProxyUDPServiceName is the name of the proxy UDP service. -const ProxyUDPServiceName = "uyuni-proxy-udp" +import ( + "github.com/uyuni-project/uyuni-tools/shared/types" +) + +const ( + // WebServiceName is the name of the server web service. + WebServiceName = "web" + // SaltServiceName is the name of the server salt service. + SaltServiceName = "salt" + // CobblerServiceName is the name of the server cobbler service. + CobblerServiceName = "cobbler" + // ReportdbServiceName is the name of the server report database service. + ReportdbServiceName = "reportdb" + // DBServiceName is the name of the server internal database service. + DBServiceName = "db" + // TaskoServiceName is the name of the server taskomatic service. + TaskoServiceName = "taskomatic" + // TftpServiceName is the name of the server tftp service. + TftpServiceName = "tftp" + // TomcatServiceName is the name of the server tomcat service. + TomcatServiceName = "tomcat" + // SearchServiceName is the name of the server search service. + SearchServiceName = "search" + + // HubAPIServiceName is the name of the server hub API service. + HubAPIServiceName = "hub-api" + + // ProxyTCPServiceName is the name of the proxy TCP service. + ProxyTCPServiceName = "uyuni-proxy-tcp" + + // ProxyUDPServiceName is the name of the proxy UDP service. + ProxyUDPServiceName = "uyuni-proxy-udp" +) // NewPortMap is a constructor for PortMap type. func NewPortMap(service string, name string, exposed int, port int) types.PortMap { @@ -30,48 +50,54 @@ func NewPortMap(service string, name string, exposed int, port int) types.PortMa // WebPorts is the list of ports for the server web service. var WebPorts = []types.PortMap{ - NewPortMap(ServerTCPServiceName, "http", 80, 80), + NewPortMap(WebServiceName, "http", 80, 80), +} + +// ReportDBPorts is the list of ports for the server report db service. +var ReportDBPorts = []types.PortMap{ + NewPortMap(ReportdbServiceName, "pgsql", 5432, 5432), + NewPortMap(ReportdbServiceName, "exporter", 9187, 9187), } -// PgsqlPorts is the list of ports for the server report db service. -var PgsqlPorts = []types.PortMap{ - NewPortMap(ServerTCPServiceName, "pgsql", 5432, 5432), - NewPortMap(ServerTCPServiceName, "exporter", 9187, 9187), +// DBPorts is the list of ports for the server internal db service. +var DBPorts = []types.PortMap{ + NewPortMap(DBServiceName, "pgsql", 5432, 5432), + NewPortMap(DBServiceName, "exporter", 9187, 9187), } // SaltPorts is the list of ports for the server salt service. var SaltPorts = []types.PortMap{ - NewPortMap(ServerTCPServiceName, "publish", 4505, 4505), - NewPortMap(ServerTCPServiceName, "request", 4506, 4506), + NewPortMap(SaltServiceName, "publish", 4505, 4505), + NewPortMap(SaltServiceName, "request", 4506, 4506), } // CobblerPorts is the list of ports for the server cobbler service. var CobblerPorts = []types.PortMap{ - NewPortMap(ServerTCPServiceName, "cobbler", 25151, 25151), + NewPortMap(CobblerServiceName, "cobbler", 25151, 25151), } // TaskoPorts is the list of ports for the server taskomatic service. var TaskoPorts = []types.PortMap{ - NewPortMap(ServerTCPServiceName, "jmx", 5556, 5556), - NewPortMap(ServerTCPServiceName, "mtrx", 9800, 9800), - NewPortMap(ServerTCPServiceName, "debug", 8001, 8001), + NewPortMap(TaskoServiceName, "jmx", 5556, 5556), + NewPortMap(TaskoServiceName, "mtrx", 9800, 9800), + NewPortMap(TaskoServiceName, "debug", 8001, 8001), } // TomcatPorts is the list of ports for the server tomcat service. var TomcatPorts = []types.PortMap{ - NewPortMap(ServerTCPServiceName, "jmx", 5557, 5557), - NewPortMap(ServerTCPServiceName, "debug", 8003, 8003), + NewPortMap(TomcatServiceName, "jmx", 5557, 5557), + NewPortMap(TomcatServiceName, "debug", 8003, 8003), } // SearchPorts is the list of ports for the server search service. var SearchPorts = []types.PortMap{ - NewPortMap(ServerTCPServiceName, "debug", 8002, 8002), + NewPortMap(SearchServiceName, "debug", 8002, 8002), } // TftpPorts is the list of ports for the server tftp service. var TftpPorts = []types.PortMap{ { - Service: ServerUDPServiceName, + Service: TftpServiceName, Name: "tftp", Exposed: 69, Port: 69, @@ -85,7 +111,7 @@ var TftpPorts = []types.PortMap{ func GetServerPorts(debug bool) []types.PortMap { ports := []types.PortMap{} ports = appendPorts(ports, debug, WebPorts...) - ports = appendPorts(ports, debug, PgsqlPorts...) + ports = appendPorts(ports, debug, ReportDBPorts...) ports = appendPorts(ports, debug, SaltPorts...) ports = appendPorts(ports, debug, CobblerPorts...) ports = appendPorts(ports, debug, TaskoPorts...) @@ -113,7 +139,7 @@ var TCPPodmanPorts = []types.PortMap{ // HubXmlrpcPorts are the tcp ports required by the Hub XMLRPC API service. var HubXmlrpcPorts = []types.PortMap{ - NewPortMap(ServerTCPServiceName, "xmlrpc", 2830, 2830), + NewPortMap(HubAPIServiceName, "xmlrpc", 2830, 2830), } // ProxyTCPPorts are the tcp ports required by the proxy. diff --git a/shared/utils/ports_test.go b/shared/utils/ports_test.go index fec5fa807..c5f1af20f 100644 --- a/shared/utils/ports_test.go +++ b/shared/utils/ports_test.go @@ -11,7 +11,7 @@ import ( ) func TestGetServerPorts(t *testing.T) { - allPorts := len(WebPorts) + len(PgsqlPorts) + len(SaltPorts) + len(CobblerPorts) + + allPorts := len(WebPorts) + len(ReportDBPorts) + len(SaltPorts) + len(CobblerPorts) + len(TaskoPorts) + len(TomcatPorts) + len(SearchPorts) + len(TftpPorts) ports := GetServerPorts(false) diff --git a/shared/utils/utils.go b/shared/utils/utils.go index 968afb255..0f2957839 100644 --- a/shared/utils/utils.go +++ b/shared/utils/utils.go @@ -46,6 +46,7 @@ type InspectResult struct { CommonInspectData `mapstructure:",squash"` Timezone string HasHubXmlrpcAPI bool `mapstructure:"has_hubxmlrpc"` + Debug bool `mapstructure:"debug"` } func checkValueSize(value string, min int, max int) bool { diff --git a/shared/utils/volumes.go b/shared/utils/volumes.go index 4181a8bda..c0aefe242 100644 --- a/shared/utils/volumes.go +++ b/shared/utils/volumes.go @@ -8,9 +8,9 @@ import "github.com/uyuni-project/uyuni-tools/shared/types" // PgsqlRequiredVolumeMounts represents volumes mount used by PostgreSQL. var PgsqlRequiredVolumeMounts = []types.VolumeMount{ - {MountPath: "/etc/pki/tls", Name: "etc-tls"}, - {MountPath: "/var/lib/pgsql", Name: "var-pgsql"}, - {MountPath: "/etc/rhn", Name: "etc-rhn"}, + {MountPath: "/etc/pki/tls", Name: "etc-tls", Size: "1Mi"}, + {MountPath: "/var/lib/pgsql", Name: "var-pgsql", Size: "50Gi"}, + {MountPath: "/etc/rhn", Name: "etc-rhn", Size: "1Mi"}, {MountPath: "/etc/pki/spacewalk-tls", Name: "tls-key"}, } @@ -29,18 +29,17 @@ var PgsqlRequiredVolumes = []types.Volume{ }, } -// EtcServerVolumeMounts represents volumes mounted in /etc folder. -var EtcServerVolumeMounts = []types.VolumeMount{ - {MountPath: "/etc/apache2", Name: "etc-apache2"}, - {MountPath: "/etc/systemd/system/multi-user.target.wants", Name: "etc-systemd-multi"}, - {MountPath: "/etc/systemd/system/sockets.target.wants", Name: "etc-systemd-sockets"}, - {MountPath: "/etc/salt", Name: "etc-salt"}, - {MountPath: "/etc/rhn", Name: "etc-rhn"}, - {MountPath: "/etc/tomcat", Name: "etc-tomcat"}, - {MountPath: "/etc/cobbler", Name: "etc-cobbler"}, - {MountPath: "/etc/sysconfig", Name: "etc-sysconfig"}, - {MountPath: "/etc/postfix", Name: "etc-postfix"}, - {MountPath: "/etc/sssd", Name: "etc-sssd"}, +// etcServerVolumeMounts represents volumes mounted in /etc folder. +var etcServerVolumeMounts = []types.VolumeMount{ + {MountPath: "/etc/apache2", Name: "etc-apache2", Size: "1Mi"}, + {MountPath: "/etc/systemd/system/multi-user.target.wants", Name: "etc-systemd-multi", Size: "1Mi"}, + {MountPath: "/etc/systemd/system/sockets.target.wants", Name: "etc-systemd-sockets", Size: "1Mi"}, + {MountPath: "/etc/salt", Name: "etc-salt", Size: "1Mi"}, + {MountPath: "/etc/tomcat", Name: "etc-tomcat", Size: "1Mi"}, + {MountPath: "/etc/cobbler", Name: "etc-cobbler", Size: "1Mi"}, + {MountPath: "/etc/sysconfig", Name: "etc-sysconfig", Size: "20Mi"}, + {MountPath: "/etc/postfix", Name: "etc-postfix", Size: "1Mi"}, + {MountPath: "/etc/sssd", Name: "etc-sssd", Size: "1Mi"}, } // EtcServerVolumes represents volumes used for configuration. @@ -57,27 +56,27 @@ var EtcServerVolumes = []types.Volume{ {Name: "etc-sssd", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-sssd"}}, } -var etcAndPgsqlVolumeMounts = append(PgsqlRequiredVolumeMounts, EtcServerVolumeMounts[:]...) +var etcAndPgsqlVolumeMounts = append(PgsqlRequiredVolumeMounts, etcServerVolumeMounts[:]...) var etcAndPgsqlVolumes = append(PgsqlRequiredVolumes, EtcServerVolumes[:]...) // ServerVolumeMounts should match the volumes mapping from the container definition in both // the helm chart and the systemctl services definitions. var ServerVolumeMounts = append([]types.VolumeMount{ - {MountPath: "/var/lib/cobbler", Name: "var-cobbler"}, - {MountPath: "/var/lib/rhn/search", Name: "var-search"}, - {MountPath: "/var/lib/salt", Name: "var-salt"}, - {MountPath: "/var/cache", Name: "var-cache"}, - {MountPath: "/var/spacewalk", Name: "var-spacewalk"}, - {MountPath: "/var/log", Name: "var-log"}, - {MountPath: "/srv/salt", Name: "srv-salt"}, - {MountPath: "/srv/www/", Name: "srv-www"}, - {MountPath: "/srv/tftpboot", Name: "srv-tftpboot"}, - {MountPath: "/srv/formula_metadata", Name: "srv-formulametadata"}, - {MountPath: "/srv/pillar", Name: "srv-pillar"}, - {MountPath: "/srv/susemanager", Name: "srv-susemanager"}, - {MountPath: "/srv/spacewalk", Name: "srv-spacewalk"}, - {MountPath: "/root", Name: "root"}, - {MountPath: "/etc/pki/trust/anchors", Name: "ca-cert"}, + {MountPath: "/var/lib/cobbler", Name: "var-cobbler", Size: "10Mi"}, + {MountPath: "/var/lib/rhn/search", Name: "var-search", Size: "10Gi"}, + {MountPath: "/var/lib/salt", Name: "var-salt", Size: "10Mi"}, + {MountPath: "/var/cache", Name: "var-cache", Size: "10Gi"}, + {MountPath: "/var/spacewalk", Name: "var-spacewalk", Size: "100Gi"}, + {MountPath: "/var/log", Name: "var-log", Size: "2Gi"}, + {MountPath: "/srv/salt", Name: "srv-salt", Size: "10Mi"}, + {MountPath: "/srv/www/", Name: "srv-www", Size: "100Gi"}, + {MountPath: "/srv/tftpboot", Name: "srv-tftpboot", Size: "300Mi"}, + {MountPath: "/srv/formula_metadata", Name: "srv-formulametadata", Size: "10Mi"}, + {MountPath: "/srv/pillar", Name: "srv-pillar", Size: "10Mi"}, + {MountPath: "/srv/susemanager", Name: "srv-susemanager", Size: "1Mi"}, + {MountPath: "/srv/spacewalk", Name: "srv-spacewalk", Size: "10Mi"}, + {MountPath: "/root", Name: "root", Size: "1Mi"}, + {MountPath: "/etc/pki/trust/anchors/", Name: "ca-cert"}, }, etcAndPgsqlVolumeMounts[:]...) // ServerVolumes match the volume with Persistent Volume Claim. From c65750d1c5109901674ba2be2f2225d896cd860b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Wed, 9 Oct 2024 18:10:32 +0200 Subject: [PATCH 11/19] Move the RunSetup function to shared In order to share the same code for installation, migration and upgrade the RunSetup() function needs to move to the mgradm shared utils module. --- mgradm/cmd/install/kubernetes/utils.go | 3 +-- mgradm/cmd/install/podman/utils.go | 4 ++-- .../install/shared/shared.go => shared/utils/setup.go} | 9 ++++----- 3 files changed, 7 insertions(+), 9 deletions(-) rename mgradm/{cmd/install/shared/shared.go => shared/utils/setup.go} (94%) diff --git a/mgradm/cmd/install/kubernetes/utils.go b/mgradm/cmd/install/kubernetes/utils.go index d44a60448..52dba03e7 100644 --- a/mgradm/cmd/install/kubernetes/utils.go +++ b/mgradm/cmd/install/kubernetes/utils.go @@ -13,7 +13,6 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" - install_shared "github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/shared" "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" "github.com/uyuni-project/uyuni-tools/shared" @@ -96,7 +95,7 @@ func installForKubernetes( "NO_SSL": "Y", } - if err := install_shared.RunSetup(cnx, &flags.ServerFlags, args[0], envs); err != nil { + if err := adm_utils.RunSetup(cnx, &flags.ServerFlags, args[0], envs); err != nil { namespace, err := cnx.GetNamespace("") if err != nil { return shared_utils.Errorf(err, L("failed to stop service")) diff --git a/mgradm/cmd/install/podman/utils.go b/mgradm/cmd/install/podman/utils.go index 8801a380f..c8b83f92f 100644 --- a/mgradm/cmd/install/podman/utils.go +++ b/mgradm/cmd/install/podman/utils.go @@ -12,10 +12,10 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" - install_shared "github.com/uyuni-project/uyuni-tools/mgradm/cmd/install/shared" "github.com/uyuni-project/uyuni-tools/mgradm/shared/coco" "github.com/uyuni-project/uyuni-tools/mgradm/shared/hub" "github.com/uyuni-project/uyuni-tools/mgradm/shared/podman" + adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" "github.com/uyuni-project/uyuni-tools/shared" . "github.com/uyuni-project/uyuni-tools/shared/l10n" shared_podman "github.com/uyuni-project/uyuni-tools/shared/podman" @@ -114,7 +114,7 @@ func installForPodman( log.Info().Msg(L("Run setup command in the container")) - if err := install_shared.RunSetup(cnx, &flags.ServerFlags, fqdn, env); err != nil { + if err := adm_utils.RunSetup(cnx, &flags.ServerFlags, fqdn, env); err != nil { if stopErr := systemd.StopService(shared_podman.ServerService); stopErr != nil { log.Error().Msgf(L("Failed to stop service: %v"), stopErr) } diff --git a/mgradm/cmd/install/shared/shared.go b/mgradm/shared/utils/setup.go similarity index 94% rename from mgradm/cmd/install/shared/shared.go rename to mgradm/shared/utils/setup.go index 261619b54..a7fb5a534 100644 --- a/mgradm/cmd/install/shared/shared.go +++ b/mgradm/shared/utils/setup.go @@ -2,7 +2,7 @@ // // SPDX-License-Identifier: Apache-2.0 -package shared +package utils import ( "errors" @@ -13,7 +13,6 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" "github.com/uyuni-project/uyuni-tools/shared" "github.com/uyuni-project/uyuni-tools/shared/api" "github.com/uyuni-project/uyuni-tools/shared/api/org" @@ -24,7 +23,7 @@ import ( const setupName = "setup.sh" // RunSetup execute the setup. -func RunSetup(cnx *shared.Connection, flags *adm_utils.ServerFlags, fqdn string, env map[string]string) error { +func RunSetup(cnx *shared.Connection, flags *ServerFlags, fqdn string, env map[string]string) error { // Containers should be running now, check storage if it is using volume from already configured server preconfigured := false if isServerConfigured(cnx) { @@ -44,7 +43,7 @@ func RunSetup(cnx *shared.Connection, flags *adm_utils.ServerFlags, fqdn string, return utils.Errorf(err, L("cannot copy /tmp/setup.sh")) } - err = adm_utils.ExecCommand(zerolog.InfoLevel, cnx, "/tmp/setup.sh") + err = ExecCommand(zerolog.InfoLevel, cnx, "/tmp/setup.sh") if err != nil && !preconfigured { return utils.Errorf(err, L("error running the setup script")) } @@ -105,7 +104,7 @@ func RunSetup(cnx *shared.Connection, flags *adm_utils.ServerFlags, fqdn string, // The script exports all the needed environment variables and calls uyuni's mgr-setup. // Podman or kubernetes-specific variables can be passed using extraEnv parameter. func generateSetupScript( - flags *adm_utils.InstallationFlags, + flags *InstallationFlags, fqdn string, mirror string, extraEnv map[string]string, From d959a4d386cc8ae9a85d3afad0384893109ef219 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Mon, 30 Sep 2024 13:35:02 +0200 Subject: [PATCH 12/19] Refactor kubernetes uninstall Remove all server resources without relying on the helm chart. --- mgradm/cmd/uninstall/kubernetes.go | 51 ++++++++++++------------------ shared/kubernetes/kubernetes.go | 8 +++++ 2 files changed, 28 insertions(+), 31 deletions(-) diff --git a/mgradm/cmd/uninstall/kubernetes.go b/mgradm/cmd/uninstall/kubernetes.go index 36816c666..5b24aa61c 100644 --- a/mgradm/cmd/uninstall/kubernetes.go +++ b/mgradm/cmd/uninstall/kubernetes.go @@ -7,9 +7,8 @@ package uninstall import ( - "fmt" + "strings" - "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "github.com/uyuni-project/uyuni-tools/shared" @@ -46,38 +45,26 @@ func uninstallForKubernetes( if err != nil { return err } - if err := kubernetes.HelmUninstall(serverNamespace, kubeconfig, kubernetes.ServerApp, !flags.Force); err != nil { - return err - } - // Remove the remaining configmap and secrets + // Remove all Uyuni resources if serverNamespace != "" { - _, err := utils.RunCmdOutput(zerolog.TraceLevel, "kubectl", "-n", serverNamespace, "get", "secret", "uyuni-ca") - caSecret := "uyuni-ca" - if err != nil { - caSecret = "" + objects := "job,deploy,svc,ingress,pvc,cm,secret" + if kubernetes.HasResource("ingressroutetcps") { + objects += ",middlewares,ingressroutetcps,ingressrouteudps" } + if kubernetes.HasResource("issuers") { + objects += ",issuers,certificates" + } + deleteCmd := []string{ + "kubectl", "delete", "-n", serverNamespace, objects, + "-l", kubernetes.AppLabel + "=" + kubernetes.ServerApp, + } if !flags.Force { - log.Info().Msgf(L("Would run %s"), fmt.Sprintf("kubectl delete -n %s configmap uyuni-ca", serverNamespace)) - log.Info().Msgf(L("Would run %s"), - fmt.Sprintf("kubectl delete -n %s secret uyuni-cert %s", serverNamespace, caSecret), - ) + log.Info().Msgf(L("Would run %s"), strings.Join(deleteCmd, " ")) } else { - log.Info().Msgf(L("Running %s"), fmt.Sprintf("kubectl delete -n %s configmap uyuni-ca", serverNamespace)) - if err := utils.RunCmd("kubectl", "delete", "-n", serverNamespace, "configmap", "uyuni-ca"); err != nil { - log.Info().Err(err).Msgf(L("Failed deleting config map")) - } - - log.Info().Msgf(L("Running %s"), fmt.Sprintf("kubectl delete -n %s secret uyuni-cert %s", serverNamespace, caSecret)) - - args := []string{"delete", "-n", serverNamespace, "secret", "uyuni-cert"} - if caSecret != "" { - args = append(args, caSecret) - } - err := utils.RunCmd("kubectl", args...) - if err != nil { - log.Info().Err(err).Msgf(L("Failed deleting secret")) + if err := utils.RunCmd(deleteCmd[0], deleteCmd[1:]...); err != nil { + return utils.Errorf(err, L("failed to delete server resources")) } } } @@ -87,15 +74,17 @@ func uninstallForKubernetes( // Since some storage plugins don't handle Delete policy, we may need to check for error events to avoid infinite loop // Uninstall cert-manager if we installed it - certManagerConnection := shared.NewConnection("kubectl", "", "") + certManagerConnection := shared.NewConnection("kubectl", "", "-linstalledby=mgradm") // TODO: re-add "-linstalledby=mgradm" filter once the label is added in helm release // mgradm/shared/kubernetes/certificates.go:124 was supposed to be addressing it certManagerNamespace, err := certManagerConnection.GetNamespace("cert-manager") if err != nil { return err } - if err := kubernetes.HelmUninstall(certManagerNamespace, kubeconfig, "cert-manager", !flags.Force); err != nil { - return err + if certManagerNamespace != "" { + if err := kubernetes.HelmUninstall(certManagerNamespace, kubeconfig, "cert-manager", !flags.Force); err != nil { + return err + } } // Remove the K3s Traefik config diff --git a/shared/kubernetes/kubernetes.go b/shared/kubernetes/kubernetes.go index 65dda8fcc..fa2986f66 100644 --- a/shared/kubernetes/kubernetes.go +++ b/shared/kubernetes/kubernetes.go @@ -224,3 +224,11 @@ func GetDeploymentImagePullSecret(namespace string, filter string) (string, erro return strings.TrimSpace(string(out)), nil } + +// HasResource checks if a resource is available on the cluster. +func HasResource(name string) bool { + if err := utils.RunCmd("kubectl", "explain", name); err != nil { + return false + } + return true +} From f080d2ad9cc601d95efe4fc1aa929cc84f17d88e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Tue, 24 Sep 2024 14:41:59 +0200 Subject: [PATCH 13/19] Merge kubernetes install/upgrade/migrate into one entry point Refactor upgrade and install of the server to no longer need the helm chart as initiated for the migration, but merge all those logics into a single Reconcile() function to avoid redundancy. Merging the code into a single function will also help figuring out how to implement an operator in the future. --- mgradm/cmd/inspect/kubernetes.go | 15 +- mgradm/cmd/install/kubernetes/kubernetes.go | 11 + .../cmd/install/kubernetes/kubernetes_test.go | 2 + mgradm/cmd/install/kubernetes/utils.go | 116 ------ mgradm/cmd/migrate/kubernetes/migrationJob.go | 14 + mgradm/cmd/migrate/kubernetes/ssl.go | 51 --- mgradm/cmd/migrate/kubernetes/utils.go | 176 ++------- mgradm/cmd/upgrade/kubernetes/utils.go | 4 +- mgradm/shared/kubernetes/certificates.go | 64 ++-- mgradm/shared/kubernetes/certificates_test.go | 46 +++ mgradm/shared/kubernetes/deployment.go | 12 + mgradm/shared/kubernetes/install.go | 269 -------------- mgradm/shared/kubernetes/k3s.go | 213 ----------- mgradm/shared/kubernetes/node.go | 30 ++ mgradm/shared/kubernetes/ports.go | 22 ++ mgradm/shared/kubernetes/reconcile.go | 339 ++++++++++++++++++ mgradm/shared/kubernetes/traefik.go | 2 +- .../templates/mgrSetupScriptTemplate.go | 3 + mgradm/shared/utils/cmd_utils.go | 7 +- shared/connection.go | 66 +--- shared/kubernetes/deploy.go | 38 ++ shared/kubernetes/deploy_test.go | 58 +++ shared/kubernetes/inspect.go | 52 +++ shared/kubernetes/k3s.go | 72 ---- shared/kubernetes/kubernetes.go | 7 + shared/kubernetes/pvc.go | 11 + shared/kubernetes/pvc_test.go | 21 ++ shared/kubernetes/utils.go | 214 +++++++---- shared/kubernetes/utils_test.go | 124 +++++++ shared/types/ssl.go | 2 + shared/utils/inspector.go | 16 + shared/utils/volumes.go | 63 +--- uyuni-tools.changes.cbosdo.k8s-refactoring | 1 + 33 files changed, 1047 insertions(+), 1094 deletions(-) delete mode 100644 mgradm/cmd/install/kubernetes/utils.go delete mode 100644 mgradm/cmd/migrate/kubernetes/ssl.go create mode 100644 mgradm/shared/kubernetes/certificates_test.go delete mode 100644 mgradm/shared/kubernetes/install.go delete mode 100644 mgradm/shared/kubernetes/k3s.go create mode 100644 mgradm/shared/kubernetes/node.go create mode 100644 mgradm/shared/kubernetes/ports.go create mode 100644 mgradm/shared/kubernetes/reconcile.go create mode 100644 shared/kubernetes/deploy.go create mode 100644 shared/kubernetes/deploy_test.go create mode 100644 shared/kubernetes/inspect.go create mode 100644 shared/kubernetes/utils_test.go create mode 100644 uyuni-tools.changes.cbosdo.k8s-refactoring diff --git a/mgradm/cmd/inspect/kubernetes.go b/mgradm/cmd/inspect/kubernetes.go index 89d7f35c8..32efdaeea 100644 --- a/mgradm/cmd/inspect/kubernetes.go +++ b/mgradm/cmd/inspect/kubernetes.go @@ -14,7 +14,7 @@ import ( "github.com/spf13/cobra" "github.com/uyuni-project/uyuni-tools/shared" - shared_kubernetes "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" @@ -31,11 +31,11 @@ func kuberneteInspect( return utils.Errorf(err, L("failed to determine image")) } - cnx := shared.NewConnection("kubectl", "", shared_kubernetes.ServerFilter) + cnx := shared.NewConnection("kubectl", "", kubernetes.ServerFilter) if len(serverImage) <= 0 { log.Debug().Msg("Use deployed image") - serverImage, err = shared_kubernetes.GetRunningImage("uyuni") + serverImage, err = kubernetes.GetRunningImage("uyuni") if err != nil { return errors.New(L("failed to find the image of the currently running server container: %s")) } @@ -45,7 +45,14 @@ func kuberneteInspect( if err != nil { return utils.Errorf(err, L("failed retrieving namespace")) } - inspectResult, err := shared_kubernetes.InspectKubernetes(namespace, serverImage, flags.Image.PullPolicy) + + // Get the SCC credentials secret if existing + pullSecret, err := kubernetes.GetSCCSecret(namespace, &types.SCCCredentials{}, kubernetes.ServerApp) + if err != nil { + return err + } + + inspectResult, err := kubernetes.InspectServer(namespace, serverImage, flags.Image.PullPolicy, pullSecret) if err != nil { return utils.Errorf(err, L("inspect command failed")) } diff --git a/mgradm/cmd/install/kubernetes/kubernetes.go b/mgradm/cmd/install/kubernetes/kubernetes.go index 34ce80810..dad793305 100644 --- a/mgradm/cmd/install/kubernetes/kubernetes.go +++ b/mgradm/cmd/install/kubernetes/kubernetes.go @@ -44,6 +44,7 @@ NOTE: installing on a remote cluster is not supported yet! shared.AddInstallFlags(cmd) cmd_utils.AddHelmInstallFlag(cmd) + cmd_utils.AddVolumesFlags(cmd) return cmd } @@ -51,3 +52,13 @@ NOTE: installing on a remote cluster is not supported yet! func NewCommand(globalFlags *types.GlobalFlags) *cobra.Command { return newCmd(globalFlags, installForKubernetes) } + +func installForKubernetes( + _ *types.GlobalFlags, + flags *kubernetes.KubernetesServerFlags, + cmd *cobra.Command, + args []string, +) error { + flags.Installation.CheckParameters(cmd, "kubectl") + return kubernetes.Reconcile(flags, args[0]) +} diff --git a/mgradm/cmd/install/kubernetes/kubernetes_test.go b/mgradm/cmd/install/kubernetes/kubernetes_test.go index 69fdee454..097d5fa2b 100644 --- a/mgradm/cmd/install/kubernetes/kubernetes_test.go +++ b/mgradm/cmd/install/kubernetes/kubernetes_test.go @@ -19,6 +19,7 @@ import ( func TestParamsParsing(t *testing.T) { args := flagstests.InstallFlagsTestArgs() args = append(args, flagstests.ServerHelmFlagsTestArgs...) + args = append(args, flagstests.VolumesFlagsTestExpected...) args = append(args, "srv.fq.dn") // Test function asserting that the args are properly parsed @@ -27,6 +28,7 @@ func TestParamsParsing(t *testing.T) { ) error { flagstests.AssertInstallFlags(t, &flags.ServerFlags) flagstests.AssertServerHelmFlags(t, &flags.Helm) + flagstests.AssertVolumesFlags(t, &flags.Volumes) testutils.AssertEquals(t, "Wrong FQDN", "srv.fq.dn", args[0]) return nil } diff --git a/mgradm/cmd/install/kubernetes/utils.go b/mgradm/cmd/install/kubernetes/utils.go deleted file mode 100644 index 52dba03e7..000000000 --- a/mgradm/cmd/install/kubernetes/utils.go +++ /dev/null @@ -1,116 +0,0 @@ -// SPDX-FileCopyrightText: 2024 SUSE LLC -// -// SPDX-License-Identifier: Apache-2.0 - -//go:build !nok8s - -package kubernetes - -import ( - "fmt" - "os/exec" - - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" - "github.com/spf13/cobra" - "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared" - shared_kubernetes "github.com/uyuni-project/uyuni-tools/shared/kubernetes" - . "github.com/uyuni-project/uyuni-tools/shared/l10n" - "github.com/uyuni-project/uyuni-tools/shared/types" - shared_utils "github.com/uyuni-project/uyuni-tools/shared/utils" -) - -func installForKubernetes( - _ *types.GlobalFlags, - flags *kubernetes.KubernetesServerFlags, - cmd *cobra.Command, - args []string, -) error { - for _, binary := range []string{"kubectl", "helm"} { - if _, err := exec.LookPath(binary); err != nil { - return fmt.Errorf(L("install %s before running this command"), binary) - } - } - - flags.Installation.CheckParameters(cmd, "kubectl") - cnx := shared.NewConnection("kubectl", "", shared_kubernetes.ServerFilter) - - fqdn := args[0] - - if err := shared_utils.IsValidFQDN(fqdn); err != nil { - return err - } - - helmArgs := []string{"--set", "timezone=" + flags.Installation.TZ} - if flags.Mirror != "" { - // TODO Handle claims for multi-node clusters - helmArgs = append(helmArgs, "--set", "mirror.hostPath="+flags.Mirror) - } - if flags.Installation.Debug.Java { - helmArgs = append(helmArgs, "--set", "exposeJavaDebug=true") - } - - // Check the kubernetes cluster setup - clusterInfos, err := shared_kubernetes.CheckCluster() - if err != nil { - return err - } - - // Deploy the SSL CA or server certificate - if flags.Installation.SSL.UseExisting() { - if err := kubernetes.DeployExistingCertificate(flags.Helm.Uyuni.Namespace, &flags.Installation.SSL); err != nil { - return err - } - } else { - sslArgs, err := kubernetes.DeployGeneratedCa( - &flags.Helm, &flags.Installation.SSL, clusterInfos.GetKubeconfig(), fqdn, flags.Image.PullPolicy, - ) - - if err != nil { - return shared_utils.Errorf(err, L("cannot deploy certificate")) - } - helmArgs = append(helmArgs, sslArgs...) - } - - // Create a secret using SCC credentials if any are provided - helmArgs, err = shared_kubernetes.AddSCCSecret( - helmArgs, flags.Helm.Uyuni.Namespace, &flags.Installation.SCC, shared_kubernetes.ServerApp, - ) - if err != nil { - return err - } - - // Deploy Uyuni and wait for it to be up - if err := kubernetes.Deploy( - cnx, flags.Image.Registry, &flags.Image, &flags.HubXmlrpc, &flags.Helm, - clusterInfos, fqdn, flags.Installation.Debug.Java, false, helmArgs..., - ); err != nil { - return shared_utils.Errorf(err, L("cannot deploy uyuni")) - } - - // Create setup script + env variables and copy it to the container - envs := map[string]string{ - "NO_SSL": "Y", - } - - if err := adm_utils.RunSetup(cnx, &flags.ServerFlags, args[0], envs); err != nil { - namespace, err := cnx.GetNamespace("") - if err != nil { - return shared_utils.Errorf(err, L("failed to stop service")) - } - if stopErr := shared_kubernetes.Stop(namespace, shared_kubernetes.ServerApp); stopErr != nil { - log.Error().Err(stopErr).Msg(L("failed to stop service")) - } - return err - } - - // The CA needs to be added to the database for Kickstart use. - err = adm_utils.ExecCommand(zerolog.DebugLevel, cnx, - "/usr/bin/rhn-ssl-dbstore", "--ca-cert=/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT") - if err != nil { - return shared_utils.Errorf(err, L("error storing the SSL CA certificate in database")) - } - return nil -} diff --git a/mgradm/cmd/migrate/kubernetes/migrationJob.go b/mgradm/cmd/migrate/kubernetes/migrationJob.go index 36c984f44..e445c2fe6 100644 --- a/mgradm/cmd/migrate/kubernetes/migrationJob.go +++ b/mgradm/cmd/migrate/kubernetes/migrationJob.go @@ -102,5 +102,19 @@ func getMigrationJob( job.Spec.Template.Spec.Containers[0].VolumeMounts = volumeMounts job.Spec.Template.Spec.Volumes = volumes + initScript := `cp -a /etc/systemd/system/multi-user.target.wants/. /mnt/etc-systemd-multi` + + job.Spec.Template.Spec.InitContainers = []core.Container{ + { + Name: "init-volumes", + Image: image, + ImagePullPolicy: kubernetes.GetPullPolicy(pullPolicy), + Command: []string{"sh", "-c", initScript}, + VolumeMounts: []core.VolumeMount{ + {Name: "etc-systemd-multi", MountPath: "/mnt/etc-systemd-multi"}, + }, + }, + } + return job, nil } diff --git a/mgradm/cmd/migrate/kubernetes/ssl.go b/mgradm/cmd/migrate/kubernetes/ssl.go deleted file mode 100644 index 9ff9bcdd1..000000000 --- a/mgradm/cmd/migrate/kubernetes/ssl.go +++ /dev/null @@ -1,51 +0,0 @@ -// SPDX-FileCopyrightText: 2024 SUSE LLC -// -// SPDX-License-Identifier: Apache-2.0 - -//go:build !nok8s - -package kubernetes - -import ( - "os" - "path" - - "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - . "github.com/uyuni-project/uyuni-tools/shared/l10n" - "github.com/uyuni-project/uyuni-tools/shared/types" - "github.com/uyuni-project/uyuni-tools/shared/utils" -) - -func installExistingCertificate(namespace string, extractedData *MigrationData) error { - // Store the certificates and key to file to load them - tmpDir, cleaner, err := utils.TempDir() - if err != nil { - return err - } - defer cleaner() - - caCrtPath := path.Join(tmpDir, "ca.crt") - if err := os.WriteFile(caCrtPath, []byte(extractedData.CaCert), 0700); err != nil { - return utils.Errorf(err, L("failed to create temporary ca.crt file")) - } - - srvCrtPath := path.Join(tmpDir, "srv.crt") - if err := os.WriteFile(srvCrtPath, []byte(extractedData.ServerCert), 0700); err != nil { - return utils.Errorf(err, L("failed to create temporary srv.crt file")) - } - - srvKeyPath := path.Join(tmpDir, "srv.key") - if err := os.WriteFile(srvKeyPath, []byte(extractedData.ServerKey), 0700); err != nil { - return utils.Errorf(err, L("failed to create temporary srv.key file")) - } - - sslFlags := adm_utils.InstallSSLFlags{ - Ca: types.CaChain{Root: caCrtPath}, - Server: types.SSLPair{ - Key: srvKeyPath, - Cert: srvCrtPath, - }, - } - return kubernetes.DeployExistingCertificate(namespace, &sslFlags) -} diff --git a/mgradm/cmd/migrate/kubernetes/utils.go b/mgradm/cmd/migrate/kubernetes/utils.go index 12e7a6f7e..b2b1ae22e 100644 --- a/mgradm/cmd/migrate/kubernetes/utils.go +++ b/mgradm/cmd/migrate/kubernetes/utils.go @@ -7,14 +7,13 @@ package kubernetes import ( - "encoding/base64" - "fmt" + "os" + "path" "github.com/spf13/cobra" "github.com/uyuni-project/uyuni-tools/mgradm/shared/kubernetes" shared_kubernetes "github.com/uyuni-project/uyuni-tools/shared/kubernetes" . "github.com/uyuni-project/uyuni-tools/shared/l10n" - "github.com/uyuni-project/uyuni-tools/shared/ssl" "github.com/uyuni-project/uyuni-tools/shared/types" "github.com/uyuni-project/uyuni-tools/shared/utils" ) @@ -28,10 +27,6 @@ func migrateToKubernetes( args []string, ) error { namespace := flags.Helm.Uyuni.Namespace - // Create the namespace if not present - if err := kubernetes.CreateNamespace(namespace); err != nil { - return err - } // Create the namespace if not present if err := kubernetes.CreateNamespace(namespace); err != nil { @@ -99,166 +94,57 @@ func migrateToKubernetes( return err } - oldPgVersion := extractedData.Data.CurrentPgVersion - newPgVersion := extractedData.Data.ImagePgVersion - - // Run the DB Migration job if needed - if oldPgVersion < newPgVersion { - jobName, err := kubernetes.StartDBUpgradeJob( - namespace, flags.Image.Registry, flags.Image, flags.DBUpgradeImage, pullSecret, - oldPgVersion, newPgVersion, - ) - if err != nil { - return err - } - - // Wait for ever for the job to finish: the duration of this job depends on the amount of data to upgrade - if err := shared_kubernetes.WaitForJob(namespace, jobName, -1); err != nil { - return err - } - } else if oldPgVersion > newPgVersion { - return fmt.Errorf( - L("downgrading database from PostgreSQL %[1]d to %[2]d is not supported"), oldPgVersion, newPgVersion) - } - - // Run the DB Finalization job - schemaUpdateRequired := oldPgVersion != newPgVersion - jobName, err = kubernetes.StartDBFinalizeJob( - namespace, serverImage, flags.Image.PullPolicy, pullSecret, schemaUpdateRequired, true, - ) - if err != nil { - return err - } - - // Wait for ever for the job to finish: the duration of this job depends on the amount of data to reindex - if err := shared_kubernetes.WaitForJob(namespace, jobName, -1); err != nil { - return err - } - - // Run the Post Upgrade job - jobName, err = kubernetes.StartPostUpgradeJob(namespace, serverImage, flags.Image.PullPolicy, pullSecret) - if err != nil { - return err - } - - if err := shared_kubernetes.WaitForJob(namespace, jobName, 60); err != nil { - return err + flags.Installation.TZ = extractedData.Data.Timezone + flags.Installation.Debug.Java = extractedData.Data.Debug + if extractedData.Data.HasHubXmlrpcAPI { + flags.HubXmlrpc.Replicas = 1 + flags.HubXmlrpc.IsChanged = true } + flags.Installation.DB.User = extractedData.Data.DBUser + flags.Installation.DB.Password = extractedData.Data.DBPassword + // TODO Are those two really needed in migration? + flags.Installation.DB.Name = extractedData.Data.DBName + flags.Installation.DB.Port = extractedData.Data.DBPort - // Extract some data from the cluster to guess how to configure Uyuni. - clusterInfos, err := shared_kubernetes.CheckCluster() + sslDir, cleaner, err := utils.TempDir() if err != nil { return err } + defer cleaner() - // Install the traefik / nginx config on the node - // This will never be done in an operator. - needsHub := flags.HubXmlrpc.Replicas > 0 - if err := kubernetes.DeployNodeConfig(namespace, clusterInfos, needsHub, extractedData.Data.Debug); err != nil { + // Extract the SSL data as files and pass them as arguments to share code with installation. + if err := writeToFile( + extractedData.CaCert, path.Join(sslDir, "ca.crt"), &flags.Installation.SSL.Ca.Root, + ); err != nil { return err } - // Deploy the SSL CA and server certificates - var caIssuer string + // The CA key shouldn't be stored as a temporary file. if extractedData.CaKey != "" { - // cert-manager is not required for 3rd party certificates, only if we have the CA key. - // Note that in an operator we won't be able to install cert-manager and just wait for it to be installed. - kubeconfig := clusterInfos.GetKubeconfig() - - if err := kubernetes.InstallCertManager(&flags.Helm, kubeconfig, flags.Image.PullPolicy); err != nil { - return utils.Errorf(err, L("cannot install cert manager")) - } - - // Convert CA to RSA to use in a Kubernetes TLS secret. - // In an operator we would have to fail now if there is no SSL password as we cannot prompt it. - ca := types.SSLPair{ - Key: base64.StdEncoding.EncodeToString( - ssl.GetRsaKey(extractedData.CaKey, flags.Installation.SSL.Password), - ), - Cert: base64.StdEncoding.EncodeToString(ssl.StripTextFromCertificate(extractedData.CaCert)), - } - - // Install the cert-manager issuers - if _, err := kubernetes.DeployReusedCa(namespace, &ca); err != nil { - return err - } - caIssuer = shared_kubernetes.CaIssuerName - } else { - // Most likely a 3rd party certificate: cert-manager is not needed in this case - if err := installExistingCertificate(namespace, extractedData); err != nil { - return err - } + flags.Installation.SSL.Ca.Key = extractedData.CaKey } - // Create the Ingress routes before the deployments as those are triggering - // the creation of the uyuni-cert secret from cert-manager. - if err := kubernetes.CreateIngress(namespace, fqdn, caIssuer, clusterInfos.Ingress); err != nil { - return err - } - - // Wait for uyuni-cert secret to be ready - shared_kubernetes.WaitForSecret(namespace, kubernetes.CertSecretName) - - deploymentsStarting := []string{kubernetes.ServerDeployName} - // Start the server - if err := kubernetes.CreateServerDeployment( - namespace, serverImage, flags.Image.PullPolicy, extractedData.Data.Timezone, extractedData.Data.Debug, - flags.Volumes.Mirror, pullSecret, + if err := writeToFile( + extractedData.ServerCert, path.Join(sslDir, "srv.crt"), &flags.Installation.SSL.Server.Cert, ); err != nil { return err } - // Create the services - if err := kubernetes.CreateServices(namespace, extractedData.Data.Debug); err != nil { - return err - } - - if clusterInfos.Ingress == "traefik" { - // Create the Traefik routes - if err := kubernetes.CreateTraefikRoutes(namespace, needsHub, extractedData.Data.Debug); err != nil { - return err - } - } - - // Store the extracted DB credentials in a secret. - if err := kubernetes.CreateDBSecret( - namespace, kubernetes.DBSecret, extractedData.Data.DBUser, extractedData.Data.DBPassword, + if err := writeToFile( + extractedData.ServerKey, path.Join(sslDir, "srv.key"), &flags.Installation.SSL.Server.Key, ); err != nil { return err } - // Start the Coco Deployments if requested. - if flags.Coco.Replicas > 0 { - cocoImage, err := utils.ComputeImage(flags.Image.Registry, flags.Image.Tag, flags.Coco.Image) - if err != nil { - return err - } - if err := kubernetes.StartCocoDeployment( - namespace, cocoImage, flags.Image.PullPolicy, pullSecret, flags.Coco.Replicas, - extractedData.Data.DBPort, extractedData.Data.DBName, - ); err != nil { - return err - } - deploymentsStarting = append(deploymentsStarting, kubernetes.CocoDeployName) - } + return kubernetes.Reconcile(flags, fqdn) +} - // In an operator mind, the user would just change the custom resource to enable the feature. - if extractedData.Data.HasHubXmlrpcAPI { - // Install Hub API deployment, service - hubAPIImage, err := utils.ComputeImage(flags.Image.Registry, flags.Image.Tag, flags.HubXmlrpc.Image) - if err != nil { - return err - } - if err := kubernetes.InstallHubAPI(namespace, hubAPIImage, flags.Image.PullPolicy, pullSecret); err != nil { - return err +func writeToFile(content string, file string, flag *string) error { + if content != "" { + if err := os.WriteFile(file, []byte(content), 0600); err != nil { + return utils.Errorf(err, L("failed to write certificate to %s"), file) } - deploymentsStarting = append(deploymentsStarting, kubernetes.HubAPIDeployName) - } - - // Wait for all the deployments to be ready - if err := shared_kubernetes.WaitForDeployments(namespace, deploymentsStarting...); err != nil { - return err + *flag = file } - return nil } diff --git a/mgradm/cmd/upgrade/kubernetes/utils.go b/mgradm/cmd/upgrade/kubernetes/utils.go index 81818628b..2fa1986f9 100644 --- a/mgradm/cmd/upgrade/kubernetes/utils.go +++ b/mgradm/cmd/upgrade/kubernetes/utils.go @@ -18,7 +18,5 @@ func upgradeKubernetes( cmd *cobra.Command, args []string, ) error { - return kubernetes.Upgrade( - globalFlags, &flags.ServerFlags.Image, &flags.DBUpgradeImage, &flags.HubXmlrpc.Image, flags.Helm, cmd, args, - ) + return kubernetes.Reconcile(flags, "") } diff --git a/mgradm/shared/kubernetes/certificates.go b/mgradm/shared/kubernetes/certificates.go index 28e925df0..369c3fda0 100644 --- a/mgradm/shared/kubernetes/certificates.go +++ b/mgradm/shared/kubernetes/certificates.go @@ -12,6 +12,7 @@ import ( "fmt" "os" "path/filepath" + "strings" "time" "github.com/rs/zerolog" @@ -70,13 +71,11 @@ func DeployExistingCertificate(namespace string, sslFlags *cmd_utils.InstallSSLF } // DeployReusedCa deploys an existing SSL CA using an already installed cert-manager. -func DeployReusedCa(namespace string, ca *types.SSLPair) ([]string, error) { - helmArgs := []string{} - +func DeployReusedCa(namespace string, ca *types.SSLPair) error { log.Info().Msg(L("Creating cert-manager issuer for existing CA")) tempDir, cleaner, err := utils.TempDir() if err != nil { - return []string{}, err + return err } defer cleaner() @@ -89,7 +88,7 @@ func DeployReusedCa(namespace string, ca *types.SSLPair) ([]string, error) { } if err = utils.WriteTemplateToFile(issuerData, issuerPath, 0500, true); err != nil { - return []string{}, utils.Errorf(err, L("failed to generate issuer definition")) + return utils.Errorf(err, L("failed to generate issuer definition")) } err = utils.RunCmd("kubectl", "apply", "-f", issuerPath) @@ -97,46 +96,26 @@ func DeployReusedCa(namespace string, ca *types.SSLPair) ([]string, error) { log.Fatal().Err(err).Msg(L("Failed to create issuer")) } - // Wait for issuer to be ready - if err := waitForIssuer(namespace, kubernetes.CaIssuerName); err != nil { - return nil, err - } - helmArgs = append(helmArgs, "--set-json", ingressCertManagerAnnotation) - - // Copy the CA cert into uyuni-ca config map as the container shouldn't have the CA secret - if err := createCaConfig(namespace, []byte(ca.Cert)); err != nil { - return nil, err - } - - return helmArgs, nil + return nil } // DeployGenerateCa deploys a new SSL CA using cert-manager. func DeployGeneratedCa( - helmFlags *cmd_utils.HelmFlags, + namespace string, sslFlags *cmd_utils.InstallSSLFlags, - kubeconfig string, fqdn string, - imagePullPolicy string, -) ([]string, error) { - helmArgs := []string{} - - // Install cert-manager if needed - if err := InstallCertManager(helmFlags, kubeconfig, imagePullPolicy); err != nil { - return []string{}, utils.Errorf(err, L("cannot install cert manager")) - } - +) error { log.Info().Msg(L("Creating SSL certificate issuer")) tempDir, err := os.MkdirTemp("", "mgradm-*") if err != nil { - return []string{}, utils.Errorf(err, L("failed to create temporary directory")) + return utils.Errorf(err, L("failed to create temporary directory")) } defer os.RemoveAll(tempDir) issuerPath := filepath.Join(tempDir, "issuer.yaml") issuerData := templates.GeneratedCaIssuerTemplateData{ - Namespace: helmFlags.Uyuni.Namespace, + Namespace: namespace, Country: sslFlags.Country, State: sslFlags.State, City: sslFlags.City, @@ -147,26 +126,15 @@ func DeployGeneratedCa( } if err = utils.WriteTemplateToFile(issuerData, issuerPath, 0500, true); err != nil { - return []string{}, utils.Errorf(err, L("failed to generate issuer definition")) + return utils.Errorf(err, L("failed to generate issuer definition")) } err = utils.RunCmd("kubectl", "apply", "-f", issuerPath) if err != nil { - log.Fatal().Err(err).Msg(L("Failed to create issuer")) - } - - // Wait for issuer to be ready - if err := waitForIssuer(helmFlags.Uyuni.Namespace, "uyuni-ca-issuer"); err != nil { - return nil, err - } - helmArgs = append(helmArgs, "--set-json", ingressCertManagerAnnotation) - - // Extract the CA cert into uyuni-ca config map as the container shouldn't have the CA secret - if err := extractCaCertToConfig(helmFlags.Uyuni.Namespace); err != nil { - return nil, err + return utils.Errorf(err, L("Failed to create issuer")) } - return helmArgs, nil + return nil } // Wait for issuer to be ready. @@ -274,3 +242,11 @@ func createCaConfig(namespace string, ca []byte) error { } return kubernetes.Apply([]runtime.Object{&configMap}, L("failed to create the SSH migration ConfigMap")) } + +// HasIssuer returns true if the issuer is defined. +// +// False will be returned in case of errors or if the issuer resource doesn't exist on the cluster. +func HasIssuer(namespace string, name string) bool { + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "issuer", "-n", namespace, name, "-o", "name") + return err == nil && strings.TrimSpace(string(out)) != "" +} diff --git a/mgradm/shared/kubernetes/certificates_test.go b/mgradm/shared/kubernetes/certificates_test.go new file mode 100644 index 000000000..772d57eda --- /dev/null +++ b/mgradm/shared/kubernetes/certificates_test.go @@ -0,0 +1,46 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "errors" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/testutils" +) + +func TestHasIssuer(t *testing.T) { + type testType struct { + out string + err error + expected bool + } + + data := []testType{ + { + out: "issuer.cert-manager.io/someissuer\n", + err: nil, + expected: true, + }, + { + out: "any error\n", + err: errors.New("Any error"), + expected: false, + }, + } + + for i, test := range data { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { + return []byte(test.out), test.err + } + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected result", i+1), test.expected, + HasIssuer("somens", "someissuer"), + ) + } +} diff --git a/mgradm/shared/kubernetes/deployment.go b/mgradm/shared/kubernetes/deployment.go index 83e6b3f9a..a4bd4af8b 100644 --- a/mgradm/shared/kubernetes/deployment.go +++ b/mgradm/shared/kubernetes/deployment.go @@ -309,3 +309,15 @@ func getRunningServerImage(namespace string) string { } return strings.TrimSpace(string(out)) } + +// neverSetup checks if the server container has already been setup setup. +func neverSetup(namespace string, image string, pullPolicy string, pullSecret string) bool { + out, err := kubernetes.RunPodLogs(namespace, "ran-setup-check", image, pullPolicy, pullSecret, + []types.VolumeMount{utils.RootVolumeMount}, + "ls", "-1a", "/root/", + ) + if err != nil { + return false + } + return !strings.Contains(string(out), ".MANAGER_SETUP_COMPLETE") +} diff --git a/mgradm/shared/kubernetes/install.go b/mgradm/shared/kubernetes/install.go deleted file mode 100644 index 144214335..000000000 --- a/mgradm/shared/kubernetes/install.go +++ /dev/null @@ -1,269 +0,0 @@ -// SPDX-FileCopyrightText: 2024 SUSE LLC -// -// SPDX-License-Identifier: Apache-2.0 - -//go:build !nok8s - -package kubernetes - -import ( - "errors" - "fmt" - "os/exec" - - "github.com/rs/zerolog/log" - "github.com/spf13/cobra" - cmd_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared" - "github.com/uyuni-project/uyuni-tools/shared/kubernetes" - . "github.com/uyuni-project/uyuni-tools/shared/l10n" - "github.com/uyuni-project/uyuni-tools/shared/types" - "github.com/uyuni-project/uyuni-tools/shared/utils" -) - -// HelmAppName is the Helm application name. -const HelmAppName = "uyuni" - -// DeployNodeConfig deploy configuration files on the node. -func DeployNodeConfig( - namespace string, - clusterInfos *kubernetes.ClusterInfos, - needsHub bool, - debug bool, -) error { - // If installing on k3s, install the traefik helm config in manifests - isK3s := clusterInfos.IsK3s() - IsRke2 := clusterInfos.IsRke2() - ports := GetPortLists(needsHub, debug) - if isK3s { - return kubernetes.InstallK3sTraefikConfig(ports) - } else if IsRke2 { - return kubernetes.InstallRke2NginxConfig(ports, namespace) - } - return nil -} - -// Deploy execute a deploy of a given image and helm to a cluster. -func Deploy( - cnx *shared.Connection, - registry string, - imageFlags *types.ImageFlags, - hubXmlrpcFlags *cmd_utils.HubXmlrpcFlags, - helmFlags *cmd_utils.HelmFlags, - clusterInfos *kubernetes.ClusterInfos, - fqdn string, - debug bool, - prepare bool, - helmArgs ...string, -) error { - // If installing on k3s, install the traefik helm config in manifests - if !prepare { - if err := DeployNodeConfig( - helmFlags.Uyuni.Namespace, clusterInfos, hubXmlrpcFlags.Replicas > 0, debug, - ); err != nil { - return err - } - } - - serverImage, err := utils.ComputeImage(registry, utils.DefaultTag, *imageFlags) - if err != nil { - return utils.Errorf(err, L("failed to compute image URL")) - } - - hubXmlrpcImage, err := utils.ComputeImage(registry, imageFlags.Tag, hubXmlrpcFlags.Image) - if err != nil { - return utils.Errorf(err, L("failed to compute image URL")) - } - - // Install the uyuni server helm chart - if err := UyuniUpgrade( - serverImage, imageFlags.PullPolicy, hubXmlrpcFlags.Replicas, hubXmlrpcImage, helmFlags, - clusterInfos.GetKubeconfig(), fqdn, clusterInfos.Ingress, helmArgs..., - ); err != nil { - return utils.Errorf(err, L("cannot upgrade")) - } - - // Wait for the pod to be started - err = kubernetes.WaitForDeployments(helmFlags.Uyuni.Namespace, HelmAppName) - if err != nil { - return utils.Errorf(err, L("cannot deploy")) - } - return cnx.WaitForServer() -} - -// UyuniUpgrade runs an helm upgrade using images and helm configuration as parameters. -func UyuniUpgrade( - serverImage string, - pullPolicy string, - hubXmlrpcReplicas int, - hubXmlrpcImage string, - helmFlags *cmd_utils.HelmFlags, - kubeconfig string, - fqdn string, - ingress string, - helmArgs ...string, -) error { - log.Info().Msg(L("Installing Uyuni")) - - // The guessed ingress is passed before the user's value to let the user override it in case we got it wrong. - helmParams := []string{ - "--set", "ingress=" + ingress, - } - - extraValues := helmFlags.Uyuni.Values - if extraValues != "" { - helmParams = append(helmParams, "-f", extraValues) - } - - // The values computed from the command line need to be last to override what could be in the extras - helmParams = append(helmParams, - "--set", "images.server="+serverImage, - "--set", "pullPolicy="+string(kubernetes.GetPullPolicy(pullPolicy)), - "--set", "fqdn="+fqdn, - ) - - if hubXmlrpcReplicas > 0 { - log.Info().Msg(L("Enabling Hub XMLRPC API container.")) - helmParams = append(helmParams, - "--set", fmt.Sprintf("hub.api.replicas=%v", hubXmlrpcReplicas), - "--set", "images.hub_xmlrpc="+hubXmlrpcImage) - } - helmParams = append(helmParams, helmArgs...) - - namespace := helmFlags.Uyuni.Namespace - chart := helmFlags.Uyuni.Chart - version := helmFlags.Uyuni.Version - return kubernetes.HelmUpgrade(kubeconfig, namespace, true, "", HelmAppName, chart, version, helmParams...) -} - -// Upgrade will upgrade a server in a kubernetes cluster. -func Upgrade( - _ *types.GlobalFlags, - image *types.ImageFlags, - upgradeImage *types.ImageFlags, - hubXmlrpcImage *types.ImageFlags, - helm cmd_utils.HelmFlags, - _ *cobra.Command, - _ []string, -) error { - for _, binary := range []string{"kubectl", "helm"} { - if _, err := exec.LookPath(binary); err != nil { - return fmt.Errorf(L("install %s before running this command"), binary) - } - } - - cnx := shared.NewConnection("kubectl", "", kubernetes.ServerFilter) - namespace, err := cnx.GetNamespace("") - if err != nil { - return utils.Errorf(err, L("failed retrieving namespace")) - } - - origHubXmlrpcImage, err := kubernetes.GetRunningImage("hub-xmlrpc-api") - if err != nil { - return utils.Errorf(err, L("failed to find Hub XML-RPC API container")) - } - - serverImage, err := utils.ComputeImage(image.Registry, utils.DefaultTag, *image) - if err != nil { - return utils.Errorf(err, L("failed to compute image URL")) - } - - inspectedValues, err := kubernetes.InspectKubernetes(namespace, serverImage, image.PullPolicy) - if err != nil { - return utils.Errorf(err, L("cannot inspect kubernetes values")) - } - - var runningData *utils.ServerInspectData - if runningImage := getRunningServerImage(namespace); runningImage != "" { - runningData, err = kubernetes.InspectKubernetes(namespace, runningImage, "Never") - if err != nil { - return err - } - } - - err = cmd_utils.SanityCheck(runningData, inspectedValues, serverImage) - if err != nil { - return err - } - - fqdn := inspectedValues.Fqdn - if fqdn == "" { - return errors.New(L("inspect function did non return fqdn value")) - } - - clusterInfos, err := kubernetes.CheckCluster() - if err != nil { - return err - } - kubeconfig := clusterInfos.GetKubeconfig() - - // this is needed because folder with script needs to be mounted - // check the node before scaling down - nodeName, err := kubernetes.GetNode(namespace, kubernetes.ServerFilter) - if err != nil { - return utils.Errorf(err, L("cannot find node running uyuni")) - } - - err = kubernetes.ReplicasTo(namespace, kubernetes.ServerApp, 0) - if err != nil { - return utils.Errorf(err, L("cannot set replica to 0")) - } - - if inspectedValues.ImagePgVersion > inspectedValues.CurrentPgVersion { - log.Info().Msgf(L("Previous PostgreSQL is %[1]s, new one is %[2]s. Performing a DB version upgrade…"), - inspectedValues.CurrentPgVersion, inspectedValues.ImagePgVersion) - - if err := RunPgsqlVersionUpgrade(image.Registry, *image, *upgradeImage, nodeName, namespace, - inspectedValues.CurrentPgVersion, inspectedValues.ImagePgVersion, - ); err != nil { - return utils.Errorf(err, L("cannot run PostgreSQL version upgrade script")) - } - } else if inspectedValues.ImagePgVersion == inspectedValues.CurrentPgVersion { - log.Info().Msgf(L("Upgrading to %s without changing PostgreSQL version"), inspectedValues.UyuniRelease) - } else { - return fmt.Errorf(L("trying to downgrade PostgreSQL from %[1]s to %[2]s"), - inspectedValues.CurrentPgVersion, inspectedValues.ImagePgVersion) - } - - schemaUpdateRequired := inspectedValues.CurrentPgVersion != inspectedValues.ImagePgVersion - if err := RunPgsqlFinalizeScript( - serverImage, image.PullPolicy, namespace, nodeName, schemaUpdateRequired, false, - ); err != nil { - return utils.Errorf(err, L("cannot run PostgreSQL finalize script")) - } - - if err := RunPostUpgradeScript(serverImage, image.PullPolicy, namespace, nodeName); err != nil { - return utils.Errorf(err, L("cannot run post upgrade script")) - } - - helmArgs := []string{} - - // Get the registry secret name if any - pullSecret, err := kubernetes.GetDeploymentImagePullSecret(namespace, kubernetes.ServerFilter) - if err != nil { - return err - } - if pullSecret != "" { - helmArgs = append(helmArgs, "--set", "registrySecret="+pullSecret) - } - - hubXmlrpcImageName, err := utils.ComputeImage(image.Registry, image.Tag, *hubXmlrpcImage) - if err != nil { - return utils.Errorf(err, L("failed to compute image URL")) - } - - hubXmlrpcReplicas := 0 - if origHubXmlrpcImage != "" { - hubXmlrpcReplicas = 1 - } - - err = UyuniUpgrade( - serverImage, image.PullPolicy, hubXmlrpcReplicas, hubXmlrpcImageName, &helm, kubeconfig, fqdn, - clusterInfos.Ingress, helmArgs..., - ) - if err != nil { - return utils.Errorf(err, L("cannot upgrade to image %s"), serverImage) - } - - return kubernetes.WaitForDeployments(namespace, "uyuni") -} diff --git a/mgradm/shared/kubernetes/k3s.go b/mgradm/shared/kubernetes/k3s.go deleted file mode 100644 index ebdd507b8..000000000 --- a/mgradm/shared/kubernetes/k3s.go +++ /dev/null @@ -1,213 +0,0 @@ -// SPDX-FileCopyrightText: 2024 SUSE LLC -// -// SPDX-License-Identifier: Apache-2.0 - -package kubernetes - -import ( - "fmt" - - "github.com/rs/zerolog/log" - adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" - "github.com/uyuni-project/uyuni-tools/shared/kubernetes" - . "github.com/uyuni-project/uyuni-tools/shared/l10n" - "github.com/uyuni-project/uyuni-tools/shared/types" - "github.com/uyuni-project/uyuni-tools/shared/utils" -) - -// GetPortLists returns compiled lists of tcp and udp ports.. -func GetPortLists(hub bool, debug bool) []types.PortMap { - ports := utils.GetServerPorts(debug) - if hub { - ports = append(ports, utils.HubXmlrpcPorts...) - } - - return ports -} - -// RunPgsqlVersionUpgrade perform a PostgreSQL major upgrade. -func RunPgsqlVersionUpgrade( - registry string, - image types.ImageFlags, - upgradeImage types.ImageFlags, - namespace string, - nodeName string, - oldPgsql string, - newPgsql string, -) error { - scriptDir, cleaner, err := utils.TempDir() - if err != nil { - return err - } - defer cleaner() - if newPgsql > oldPgsql { - log.Info().Msgf( - L("Previous PostgreSQL is %[1]s, new one is %[2]s. Performing a DB version upgrade…"), - oldPgsql, newPgsql, - ) - - pgsqlVersionUpgradeContainer := "uyuni-upgrade-pgsql" - - upgradeImageURL := "" - if upgradeImage.Name == "" { - upgradeImageURL, err = utils.ComputeImage( - registry, image.Tag, image, fmt.Sprintf("-migration-%s-%s", oldPgsql, newPgsql), - ) - if err != nil { - return utils.Errorf(err, L("failed to compute image URL")) - } - } else { - upgradeImageURL, err = utils.ComputeImage(registry, image.Tag, upgradeImage) - if err != nil { - return utils.Errorf(err, L("failed to compute image URL")) - } - } - - log.Info().Msgf(L("Using database upgrade image %s"), upgradeImageURL) - pgsqlVersionUpgradeScriptName, err := adm_utils.GeneratePgsqlVersionUpgradeScript(scriptDir, oldPgsql, newPgsql) - if err != nil { - return utils.Errorf(err, L("cannot generate PostgreSQL database version upgrade script")) - } - - // delete pending pod and then check the node, because in presence of more than a pod GetNode return is wrong - if err := kubernetes.DeletePod(namespace, pgsqlVersionUpgradeContainer, kubernetes.ServerFilter); err != nil { - return utils.Errorf(err, L("cannot delete %s"), pgsqlVersionUpgradeContainer) - } - - // generate deploy data - pgsqlVersioUpgradeDeployData := types.Deployment{ - APIVersion: "v1", - Spec: &types.Spec{ - RestartPolicy: "Never", - NodeName: nodeName, - Containers: []types.Container{ - { - Name: pgsqlVersionUpgradeContainer, - VolumeMounts: append(utils.PgsqlRequiredVolumeMounts, - types.VolumeMount{MountPath: "/var/lib/uyuni-tools", Name: "var-lib-uyuni-tools"}), - }, - }, - Volumes: append(utils.PgsqlRequiredVolumes, - types.Volume{Name: "var-lib-uyuni-tools", HostPath: &types.HostPath{Path: scriptDir, Type: "Directory"}}), - }, - } - - // transform deploy in JSON - overridePgsqlVersioUpgrade, err := kubernetes.GenerateOverrideDeployment(pgsqlVersioUpgradeDeployData) - if err != nil { - return err - } - - err = kubernetes.RunPod( - namespace, pgsqlVersionUpgradeContainer, kubernetes.ServerFilter, upgradeImageURL, image.PullPolicy, - "/var/lib/uyuni-tools/"+pgsqlVersionUpgradeScriptName, overridePgsqlVersioUpgrade, - ) - if err != nil { - return utils.Errorf(err, L("error running container %s"), pgsqlVersionUpgradeContainer) - } - } - return nil -} - -// RunPgsqlFinalizeScript run the script with all the action required to a db after upgrade. -func RunPgsqlFinalizeScript( - serverImage string, pullPolicy string, namespace string, nodeName string, schemaUpdateRequired bool, migration bool, -) error { - scriptDir, cleaner, err := utils.TempDir() - if err != nil { - return err - } - defer cleaner() - pgsqlFinalizeContainer := "uyuni-finalize-pgsql" - pgsqlFinalizeScriptName, err := adm_utils.GenerateFinalizePostgresScript( - scriptDir, true, schemaUpdateRequired, true, migration, true, - ) - if err != nil { - return utils.Errorf(err, L("cannot generate PostgreSQL finalization script")) - } - // delete pending pod and then check the node, because in presence of more than a pod GetNode return is wrong - if err := kubernetes.DeletePod(namespace, pgsqlFinalizeContainer, kubernetes.ServerFilter); err != nil { - return utils.Errorf(err, L("cannot delete %s"), pgsqlFinalizeContainer) - } - // generate deploy data - pgsqlFinalizeDeployData := types.Deployment{ - APIVersion: "v1", - Spec: &types.Spec{ - RestartPolicy: "Never", - NodeName: nodeName, - Containers: []types.Container{ - { - Name: pgsqlFinalizeContainer, - VolumeMounts: append(utils.PgsqlRequiredVolumeMounts, - types.VolumeMount{MountPath: "/var/lib/uyuni-tools", Name: "var-lib-uyuni-tools"}), - }, - }, - Volumes: append(utils.PgsqlRequiredVolumes, - types.Volume{Name: "var-lib-uyuni-tools", HostPath: &types.HostPath{Path: scriptDir, Type: "Directory"}}), - }, - } - // transform deploy data in JSON - overridePgsqlFinalize, err := kubernetes.GenerateOverrideDeployment(pgsqlFinalizeDeployData) - if err != nil { - return err - } - err = kubernetes.RunPod( - namespace, pgsqlFinalizeContainer, kubernetes.ServerFilter, serverImage, pullPolicy, - "/var/lib/uyuni-tools/"+pgsqlFinalizeScriptName, overridePgsqlFinalize, - ) - if err != nil { - return utils.Errorf(err, L("error running container %s"), pgsqlFinalizeContainer) - } - return nil -} - -// RunPostUpgradeScript run the script with the changes to apply after the upgrade. -func RunPostUpgradeScript(serverImage string, pullPolicy string, namespace string, nodeName string) error { - scriptDir, cleaner, err := utils.TempDir() - if err != nil { - return err - } - defer cleaner() - postUpgradeContainer := "uyuni-post-upgrade" - postUpgradeScriptName, err := adm_utils.GeneratePostUpgradeScript(scriptDir) - if err != nil { - return utils.Errorf(err, L("cannot generate PostgreSQL finalization script")) - } - - // delete pending pod and then check the node, because in presence of more than a pod GetNode return is wrong - if err := kubernetes.DeletePod(namespace, postUpgradeContainer, kubernetes.ServerFilter); err != nil { - return utils.Errorf(err, L("cannot delete %s"), postUpgradeContainer) - } - // generate deploy data - postUpgradeDeployData := types.Deployment{ - APIVersion: "v1", - Spec: &types.Spec{ - RestartPolicy: "Never", - NodeName: nodeName, - Containers: []types.Container{ - { - Name: postUpgradeContainer, - VolumeMounts: append(utils.PgsqlRequiredVolumeMounts, - types.VolumeMount{MountPath: "/var/lib/uyuni-tools", Name: "var-lib-uyuni-tools"}), - }, - }, - Volumes: append(utils.PgsqlRequiredVolumes, - types.Volume{Name: "var-lib-uyuni-tools", HostPath: &types.HostPath{Path: scriptDir, Type: "Directory"}}), - }, - } - // transform deploy data in JSON - overridePostUpgrade, err := kubernetes.GenerateOverrideDeployment(postUpgradeDeployData) - if err != nil { - return err - } - - err = kubernetes.RunPod( - namespace, postUpgradeContainer, kubernetes.ServerFilter, serverImage, pullPolicy, - "/var/lib/uyuni-tools/"+postUpgradeScriptName, overridePostUpgrade, - ) - if err != nil { - return utils.Errorf(err, L("error running container %s"), postUpgradeContainer) - } - - return nil -} diff --git a/mgradm/shared/kubernetes/node.go b/mgradm/shared/kubernetes/node.go new file mode 100644 index 000000000..11b860535 --- /dev/null +++ b/mgradm/shared/kubernetes/node.go @@ -0,0 +1,30 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" +) + +// deployNodeConfig deploy configuration files on the node. +func deployNodeConfig( + namespace string, + clusterInfos *kubernetes.ClusterInfos, + needsHub bool, + debug bool, +) error { + // If installing on k3s, install the traefik helm config in manifests + isK3s := clusterInfos.IsK3s() + IsRke2 := clusterInfos.IsRke2() + ports := getPortList(needsHub, debug) + if isK3s { + return kubernetes.InstallK3sTraefikConfig(ports) + } else if IsRke2 { + return kubernetes.InstallRke2NginxConfig(ports, namespace) + } + return nil +} diff --git a/mgradm/shared/kubernetes/ports.go b/mgradm/shared/kubernetes/ports.go new file mode 100644 index 000000000..d8f0ae6be --- /dev/null +++ b/mgradm/shared/kubernetes/ports.go @@ -0,0 +1,22 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// getPortList returns compiled lists of tcp and udp ports.. +func getPortList(hub bool, debug bool) []types.PortMap { + ports := utils.GetServerPorts(debug) + if hub { + ports = append(ports, utils.HubXmlrpcPorts...) + } + + return ports +} diff --git a/mgradm/shared/kubernetes/reconcile.go b/mgradm/shared/kubernetes/reconcile.go new file mode 100644 index 000000000..35ed69836 --- /dev/null +++ b/mgradm/shared/kubernetes/reconcile.go @@ -0,0 +1,339 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + "encoding/base64" + "errors" + "fmt" + "os" + "os/exec" + + "github.com/rs/zerolog/log" + adm_utils "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" + "github.com/uyuni-project/uyuni-tools/shared" + "github.com/uyuni-project/uyuni-tools/shared/kubernetes" + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/ssl" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// Reconcile upgrades, migrate or install the server. +func Reconcile(flags *KubernetesServerFlags, fqdn string) error { + if _, err := exec.LookPath("kubectl"); err != nil { + return errors.New(L("install kubectl before running this command")) + } + + namespace := flags.Helm.Uyuni.Namespace + // Create the namespace if not present + if err := CreateNamespace(namespace); err != nil { + return err + } + + serverImage, err := utils.ComputeImage(flags.Image.Registry, utils.DefaultTag, flags.Image) + if err != nil { + return utils.Errorf(err, L("failed to compute image URL")) + } + + cnx := shared.NewConnection("kubectl", "", kubernetes.ServerFilter) + + // Create a secret using SCC credentials if any are provided + pullSecret, err := kubernetes.GetSCCSecret(flags.Helm.Uyuni.Namespace, &flags.Installation.SCC, kubernetes.ServerApp) + if err != nil { + return err + } + + // Do we have an existing deployment to upgrade? + // This can be freshly synchronized data from a migration or a running instance to upgrade. + hasDeployment := kubernetes.HasDeployment(namespace, kubernetes.ServerFilter) + hasDatabase := kubernetes.HasVolume(namespace, "var-pgsql") + isMigration := hasDatabase && !hasDeployment + + cocoReplicas := kubernetes.GetReplicas(namespace, CocoDeployName) + if cocoReplicas != 0 && !flags.Coco.IsChanged { + // Upgrade: detect the number of running coco replicas + flags.Coco.Replicas = cocoReplicas + } + + var inspectedData utils.ServerInspectData + if hasDatabase { + // Inspect the image and the existing volumes + data, err := kubernetes.InspectServer(namespace, serverImage, flags.Image.PullPolicy, pullSecret) + if err != nil { + return err + } + inspectedData = *data + + // Use the inspected DB port and name if not defined in the flags + if flags.Installation.DB.Port == 0 && data.DBPort != 0 { + flags.Installation.DB.Port = data.DBPort + } + + if flags.Installation.DB.Name == "" && data.DBName != "" { + flags.Installation.DB.Name = data.DBName + } + + // Do we have a running server deploy? which version is it? + // If there is no deployment / image, don't check the uyuni / SUMA upgrades + var runningData *utils.ServerInspectData + if runningImage := getRunningServerImage(namespace); runningImage != "" { + runningData, err = kubernetes.InspectServer(namespace, runningImage, "Never", pullSecret) + if err != nil { + return err + } + } + + // Run sanity checks for upgrade + if err := adm_utils.SanityCheck(runningData, &inspectedData, serverImage); err != nil { + return err + } + + // Get the fqdn from the inspected data if possible. Ignore difference with input value for now. + fqdn = inspectedData.Fqdn + + if hasDeployment { + // Scale down all deployments relying on the DB since it will be brought down during upgrade. + if cocoReplicas > 0 { + if err := kubernetes.ReplicasTo(namespace, CocoDeployName, 0); err != nil { + return utils.Errorf(err, L("cannot set confidential computing containers replicas to 0")) + } + } + + // Scale down server deployment if present to upgrade the DB + if err := kubernetes.ReplicasTo(namespace, ServerDeployName, 0); err != nil { + return utils.Errorf(err, L("cannot set server replicas to 0")) + } + } + } + + // Don't check the FQDN too early or we may not have it in case of upgrade. + if err := utils.IsValidFQDN(fqdn); err != nil { + return err + } + + mounts := GetServerMounts() + mounts = TuneMounts(mounts, &flags.Volumes) + + if err := kubernetes.CreatePersistentVolumeClaims(namespace, mounts); err != nil { + return err + } + + if hasDatabase { + oldPgVersion := inspectedData.CurrentPgVersion + newPgVersion := inspectedData.ImagePgVersion + + // Run the DB Upgrade job if needed + if oldPgVersion < newPgVersion { + jobName, err := StartDBUpgradeJob( + namespace, flags.Image.Registry, flags.Image, flags.DBUpgradeImage, pullSecret, + oldPgVersion, newPgVersion, + ) + if err != nil { + return err + } + + // Wait for ever for the job to finish: the duration of this job depends on the amount of data to upgrade + if err := kubernetes.WaitForJob(namespace, jobName, -1); err != nil { + return err + } + } else if oldPgVersion > newPgVersion { + return fmt.Errorf( + L("downgrading database from PostgreSQL %[1]d to %[2]d is not supported"), oldPgVersion, newPgVersion) + } + + // Run DB finalization job + schemaUpdateRequired := oldPgVersion != newPgVersion + jobName, err := StartDBFinalizeJob( + namespace, serverImage, flags.Image.PullPolicy, pullSecret, schemaUpdateRequired, isMigration, + ) + if err != nil { + return err + } + + // Wait for ever for the job to finish: the duration of this job depends on the amount of data to reindex + if err := kubernetes.WaitForJob(namespace, jobName, -1); err != nil { + return err + } + + // Run the Post Upgrade job + jobName, err = StartPostUpgradeJob(namespace, serverImage, flags.Image.PullPolicy, pullSecret) + if err != nil { + return err + } + + if err := kubernetes.WaitForJob(namespace, jobName, 60); err != nil { + return err + } + } + + // Extract some data from the cluster to guess how to configure Uyuni. + clusterInfos, err := kubernetes.CheckCluster() + if err != nil { + return err + } + + if replicas := kubernetes.GetReplicas(namespace, ServerDeployName); replicas > 0 && !flags.HubXmlrpc.IsChanged { + // Upgrade: detect the number of existing hub xmlrpc replicas + flags.HubXmlrpc.Replicas = replicas + } + needsHub := flags.HubXmlrpc.Replicas > 0 + + // Install the traefik / nginx config on the node + // This will never be done in an operator. + if err := deployNodeConfig(namespace, clusterInfos, needsHub, flags.Installation.Debug.Java); err != nil { + return err + } + + // Deploy the SSL CA and server certificates + var caIssuer string + if flags.Installation.SSL.UseExisting() { + if err := DeployExistingCertificate(flags.Helm.Uyuni.Namespace, &flags.Installation.SSL); err != nil { + return err + } + } else if !HasIssuer(namespace, kubernetes.CaIssuerName) { + // cert-manager is not required for 3rd party certificates, only if we have the CA key. + // Note that in an operator we won't be able to install cert-manager and just wait for it to be installed. + kubeconfig := clusterInfos.GetKubeconfig() + + if err := InstallCertManager(&flags.Helm, kubeconfig, flags.Image.PullPolicy); err != nil { + return utils.Errorf(err, L("cannot install cert manager")) + } + + if flags.Installation.SSL.UseMigratedCa() { + // Convert CA to RSA to use in a Kubernetes TLS secret. + // In an operator we would have to fail now if there is no SSL password as we cannot prompt it. + rootCA, err := os.ReadFile(flags.Installation.SSL.Ca.Root) + if err != nil { + return utils.Errorf(err, L("failed to read Root CA file")) + } + ca := types.SSLPair{ + Key: base64.StdEncoding.EncodeToString( + ssl.GetRsaKey(flags.Installation.SSL.Ca.Key, flags.Installation.SSL.Password), + ), + Cert: base64.StdEncoding.EncodeToString(ssl.StripTextFromCertificate(string(rootCA))), + } + + // Install the cert-manager issuers + if err := DeployReusedCa(namespace, &ca); err != nil { + return err + } + } else { + if err := DeployGeneratedCa(flags.Helm.Uyuni.Namespace, &flags.Installation.SSL, fqdn); err != nil { + return err + } + } + + // Wait for issuer to be ready + if err := waitForIssuer(flags.Helm.Uyuni.Namespace, kubernetes.CaIssuerName); err != nil { + return err + } + + // Extract the CA cert into uyuni-ca config map as the container shouldn't have the CA secret + if err := extractCaCertToConfig(flags.Helm.Uyuni.Namespace); err != nil { + return err + } + caIssuer = kubernetes.CaIssuerName + } + + // Create the Ingress routes before the deployments as those are triggering + // the creation of the uyuni-cert secret from cert-manager. + if err := CreateIngress(namespace, fqdn, caIssuer, clusterInfos.Ingress); err != nil { + return err + } + + // Wait for uyuni-cert secret to be ready + kubernetes.WaitForSecret(namespace, CertSecretName) + + // Start the server + if err := CreateServerDeployment( + namespace, serverImage, flags.Image.PullPolicy, flags.Installation.TZ, flags.Installation.Debug.Java, + flags.Volumes.Mirror, pullSecret, + ); err != nil { + return err + } + + // Create the services + if err := CreateServices(namespace, flags.Installation.Debug.Java); err != nil { + return err + } + + if clusterInfos.Ingress == "traefik" { + // Create the Traefik routes + if err := CreateTraefikRoutes(namespace, needsHub, flags.Installation.Debug.Java); err != nil { + return err + } + } + + // Wait for the server deployment to have a running pod before trying to set it up. + if err := kubernetes.WaitForRunningDeployment(namespace, ServerDeployName); err != nil { + return err + } + + // Run the setup only if it hasn't be done before: this is a one-off task. + // TODO Ideally we would need a job running at an earlier stage to persist the logs in a kubernetes-friendly way. + if neverSetup(namespace, serverImage, flags.Image.PullPolicy, pullSecret) { + if err := adm_utils.RunSetup( + cnx, &flags.ServerFlags, fqdn, map[string]string{"NO_SSL": "Y"}, + ); err != nil { + if stopErr := kubernetes.Stop(namespace, kubernetes.ServerApp); stopErr != nil { + log.Error().Msgf(L("Failed to stop service: %v"), stopErr) + } + return err + } + } + + // Store the DB credentials in a secret. + if flags.Installation.DB.User != "" && flags.Installation.DB.Password != "" { + if err := CreateDBSecret( + namespace, DBSecret, flags.Installation.DB.User, flags.Installation.DB.Password, + ); err != nil { + return err + } + } + + deploymentsStarting := []string{} + + // Start the Coco Deployments if requested. + if replicas := kubernetes.GetReplicas(namespace, CocoDeployName); replicas != 0 && !flags.Coco.IsChanged { + // Upgrade: detect the number of running coco replicas + flags.Coco.Replicas = replicas + } + if flags.Coco.Replicas > 0 { + cocoImage, err := utils.ComputeImage(flags.Image.Registry, flags.Image.Tag, flags.Coco.Image) + if err != nil { + return err + } + if err := StartCocoDeployment( + namespace, cocoImage, flags.Image.PullPolicy, pullSecret, flags.Coco.Replicas, + flags.Installation.DB.Port, flags.Installation.DB.Name, + ); err != nil { + return err + } + deploymentsStarting = append(deploymentsStarting, CocoDeployName) + } + + // In an operator mind, the user would just change the custom resource to enable the feature. + if needsHub { + // Install Hub API deployment, service + hubAPIImage, err := utils.ComputeImage(flags.Image.Registry, flags.Image.Tag, flags.HubXmlrpc.Image) + if err != nil { + return err + } + if err := InstallHubAPI(namespace, hubAPIImage, flags.Image.PullPolicy, pullSecret); err != nil { + return err + } + deploymentsStarting = append(deploymentsStarting, HubAPIDeployName) + } + + // Wait for all the other deployments to be ready + if err := kubernetes.WaitForDeployments(namespace, deploymentsStarting...); err != nil { + return err + } + + return nil +} diff --git a/mgradm/shared/kubernetes/traefik.go b/mgradm/shared/kubernetes/traefik.go index 914f260ef..7577369fb 100644 --- a/mgradm/shared/kubernetes/traefik.go +++ b/mgradm/shared/kubernetes/traefik.go @@ -57,7 +57,7 @@ spec: } // Write the routes from the endpoint to the services - for _, endpoint := range GetPortLists(hub, debug) { + for _, endpoint := range getPortList(hub, debug) { _, err := file.WriteString("---\n") if err != nil { return utils.Errorf(err, L("failed to write traefik middleware and routes to file")) diff --git a/mgradm/shared/templates/mgrSetupScriptTemplate.go b/mgradm/shared/templates/mgrSetupScriptTemplate.go index 2c17b5707..3cd70ada8 100644 --- a/mgradm/shared/templates/mgrSetupScriptTemplate.go +++ b/mgradm/shared/templates/mgrSetupScriptTemplate.go @@ -24,6 +24,9 @@ echo 'JAVA_OPTS=" $JAVA_OPTS -Xdebug -Xrunjdwp:transport=dt_socket,address=*:800 /usr/lib/susemanager/bin/mgr-setup -s -n RESULT=$? +# The CA needs to be added to the database for Kickstart use. +/usr/bin/rhn-ssl-dbstore --ca-cert=/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT + # clean before leaving rm $0 exit $RESULT diff --git a/mgradm/shared/utils/cmd_utils.go b/mgradm/shared/utils/cmd_utils.go index 4d4016f36..b978c96b4 100644 --- a/mgradm/shared/utils/cmd_utils.go +++ b/mgradm/shared/utils/cmd_utils.go @@ -18,7 +18,12 @@ var defaultImage = path.Join(utils.DefaultRegistry, "server") // UseExisting return true if existing SSL Cert can be used. func (f *InstallSSLFlags) UseExisting() bool { - return f.Server.Cert != "" && f.Server.Key != "" && f.Ca.Root != "" + return f.Server.Cert != "" && f.Server.Key != "" && f.Ca.Root != "" && f.Ca.Key == "" +} + +// UseMigratedCa returns true if a migrated CA and key can be used. +func (f *InstallSSLFlags) UseMigratedCa() bool { + return f.Ca.Root != "" && f.Ca.Key != "" } // CheckParameters checks that all the required flags are passed if using 3rd party certificates. diff --git a/shared/connection.go b/shared/connection.go index b28e6bd2a..9beb6ea67 100644 --- a/shared/connection.go +++ b/shared/connection.go @@ -6,7 +6,6 @@ package shared import ( "bytes" - "encoding/json" "errors" "fmt" "os" @@ -73,8 +72,8 @@ func (c *Connection) GetCommand() (string, error) { if err == nil { hasKubectl = true if out, err := utils.RunCmdOutput( - zerolog.DebugLevel, "kubectl", "--request-timeout=30s", "get", "pod", c.kubernetesFilter, "-A", - "-o=jsonpath={.items[*].metadata.name}", + zerolog.DebugLevel, "kubectl", "--request-timeout=30s", "get", "deploy", c.kubernetesFilter, + "-A", "-o=jsonpath={.items[*].metadata.name}", ); err != nil { log.Info().Msg(L("kubectl not configured to connect to a cluster, ignoring")) } else if len(bytes.TrimSpace(out)) != 0 { @@ -154,23 +153,17 @@ func (c *Connection) GetNamespace(appName string, filters ...string) (string, er } } - // retrieving namespace from helm release - clusterInfos, clusterInfosErr := kubernetes.CheckCluster() - if clusterInfosErr != nil { - return "", utils.Errorf(clusterInfosErr, L("failed to discover the cluster type")) - } - - kubeconfig := clusterInfos.GetKubeconfig() - if !kubernetes.HasHelmRelease(appName, kubeconfig) { - return "", fmt.Errorf(L("no %s helm release installed on the cluster"), appName) - } - - var namespaceErr error - c.namespace, namespaceErr = extractNamespaceFromConfig(appName, kubeconfig, filters...) - if namespaceErr != nil { - return "", utils.Errorf(namespaceErr, L("failed to find the %s deployment namespace"), appName) + // retrieving namespace from the first installed object we can find matching the filter. + // This assumes that the server or proxy has been installed only in one namespace + // with the current cluster credentials. + out, err := utils.RunCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "all", "-A", c.kubernetesFilter, + "-o", "jsonpath={.items[*].metadata.namespace}", + ) + if err != nil { + return "", utils.Errorf(err, L("failed to guest namespace")) } - + c.namespace = strings.TrimSpace(strings.Split(string(out), " ")[0]) return c.namespace, nil } @@ -384,6 +377,11 @@ func (c *Connection) TestExistenceInPod(dstpath string) bool { case "podman": commandArgs = append(commandArgs, "test", "-e", dstpath) case "kubectl": + namespace, err := c.GetNamespace("") + if err != nil { + log.Fatal().Err(err).Msg(L("failed to detect the namespace")) + } + commandArgs = append(commandArgs, "-n", namespace) commandArgs = append(commandArgs, "-c", "uyuni", "test", "-e", dstpath) default: log.Fatal().Msgf(L("unknown container kind: %s"), command) @@ -524,33 +522,3 @@ func (c *Connection) RunSupportConfig(tmpDir string) ([]string, error) { } return files, nil } - -// extractNamespaceFromConfig extracts the namespace of a given application -// from the Helm release information. -func extractNamespaceFromConfig(appName string, kubeconfig string, filters ...string) (string, error) { - args := []string{} - if kubeconfig != "" { - args = append(args, "--kubeconfig", kubeconfig) - } - args = append(args, "list", "-aA", "-f", appName, "-o", "json") - args = append(args, filters...) - - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "helm", args...) - if err != nil { - return "", utils.Errorf(err, L("failed to detect %s's namespace using helm"), appName) - } - - var data []releaseInfo - if err = json.Unmarshal(out, &data); err != nil { - return "", utils.Errorf(err, L("helm provided an invalid JSON output")) - } - - if len(data) == 1 { - return data[0].Namespace, nil - } - return "", errors.New(L("found no or more than one deployment")) -} - -type releaseInfo struct { - Namespace string `mapstructure:"namespace"` -} diff --git a/shared/kubernetes/deploy.go b/shared/kubernetes/deploy.go new file mode 100644 index 000000000..a69489942 --- /dev/null +++ b/shared/kubernetes/deploy.go @@ -0,0 +1,38 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "strconv" + "strings" + + "github.com/rs/zerolog" +) + +// HasDeployment returns true when a deployment matching the kubectl get filter is existing in the namespace. +func HasDeployment(namespace string, filter string) bool { + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "deploy", "-n", namespace, filter, "-o", "name") + if err == nil && strings.TrimSpace(string(out)) != "" { + return true + } + return false +} + +// GetReplicas return the number of replicas of a deployment. +// +// If no such deployment exists, 0 will be returned as if there was a deployment scaled down to 0. +func GetReplicas(namespace string, name string) int { + out, err := runCmdOutput(zerolog.DebugLevel, + "kubectl", "get", "deploy", "-n", namespace, name, "-o", "jsonpath={.status.replicas}", + ) + if err != nil { + return 0 + } + replicas, err := strconv.Atoi(strings.TrimSpace(string(out))) + if err != nil { + return 0 + } + return replicas +} diff --git a/shared/kubernetes/deploy_test.go b/shared/kubernetes/deploy_test.go new file mode 100644 index 000000000..8e925b2ea --- /dev/null +++ b/shared/kubernetes/deploy_test.go @@ -0,0 +1,58 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "errors" + "fmt" + "testing" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/testutils" +) + +func TestHasDeployment(t *testing.T) { + type dataType struct { + out string + err error + expected bool + } + + data := []dataType{ + {"deployment.apps/traefik\n", nil, true}, + {"\n", nil, false}, + {"Some error", errors.New("Some error"), false}, + } + + for i, test := range data { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { + return []byte(test.out), test.err + } + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected result", i+1), test.expected, + HasDeployment("kube-system", "-lapp.kubernetes.io/name=traefik"), + ) + } +} + +func TestGetReplicas(t *testing.T) { + type dataType struct { + out string + err error + expected int + } + data := []dataType{ + {"2\n", nil, 2}, + {"no such deploy\n", errors.New("No such deploy"), 0}, + {"invalid output\n", nil, 0}, + } + + for i, test := range data { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { + return []byte(test.out), test.err + } + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected result", i+1), + test.expected, GetReplicas("uyuni", "uyuni-hub-api")) + } +} diff --git a/shared/kubernetes/inspect.go b/shared/kubernetes/inspect.go new file mode 100644 index 000000000..b168faa50 --- /dev/null +++ b/shared/kubernetes/inspect.go @@ -0,0 +1,52 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +//go:build !nok8s + +package kubernetes + +import ( + . "github.com/uyuni-project/uyuni-tools/shared/l10n" + "github.com/uyuni-project/uyuni-tools/shared/types" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +// InspectServer check values on a given image and deploy. +func InspectServer( + namespace string, + serverImage string, + pullPolicy string, + pullSecret string, +) (*utils.ServerInspectData, error) { + podName := "uyuni-image-inspector" + + tempDir, cleaner, err := utils.TempDir() + if err != nil { + return nil, err + } + defer cleaner() + inspector := utils.NewServerInspector(tempDir) + // We need the inspector to write to the pod's logs instead of a file + inspector.DataPath = "/dev/stdout" + script, err := inspector.GenerateScriptString() + if err != nil { + return nil, err + } + + out, err := RunPodLogs( + namespace, podName, serverImage, pullPolicy, pullSecret, + []types.VolumeMount{utils.EtcRhnVolumeMount, utils.VarPgsqlVolumeMount}, + "sh", "-c", script, + ) + if err != nil { + return nil, err + } + + // Parse the data + inspectedData, err := utils.ReadInspectDataString[utils.ServerInspectData]([]byte(out)) + if err != nil { + return nil, utils.Errorf(err, L("failed to parse the inspected data")) + } + return inspectedData, nil +} diff --git a/shared/kubernetes/k3s.go b/shared/kubernetes/k3s.go index f2865fa6f..53b1d53a1 100644 --- a/shared/kubernetes/k3s.go +++ b/shared/kubernetes/k3s.go @@ -8,8 +8,6 @@ import ( "errors" "fmt" "os" - "os/exec" - "path" "time" "github.com/rs/zerolog" @@ -105,73 +103,3 @@ func UninstallK3sTraefikConfig(dryRun bool) { // Now that it's reinstalled, remove the file utils.UninstallFile(k3sTraefikConfigPath, dryRun) } - -// InspectKubernetes check values on a given image and deploy. -func InspectKubernetes(namespace string, serverImage string, pullPolicy string) (*utils.ServerInspectData, error) { - for _, binary := range []string{"kubectl", "helm"} { - if _, err := exec.LookPath(binary); err != nil { - return nil, fmt.Errorf(L("install %s before running this command"), binary) - } - } - - scriptDir, cleaner, err := utils.TempDir() - if err != nil { - return nil, err - } - defer cleaner() - - inspector := utils.NewServerInspector(scriptDir) - if err := inspector.GenerateScript(); err != nil { - return nil, err - } - - command := path.Join(utils.InspectContainerDirectory, utils.InspectScriptFilename) - - const podName = "inspector" - - // delete pending pod and then check the node, because in presence of more than a pod GetNode return is wrong - if err := DeletePod(namespace, podName, ServerFilter); err != nil { - return nil, utils.Errorf(err, L("cannot delete %s"), podName) - } - - // this is needed because folder with script needs to be mounted - nodeName, err := GetNode(namespace, ServerFilter) - if err != nil { - return nil, utils.Errorf(err, L("cannot find node running uyuni")) - } - - // generate deploy data - deployData := types.Deployment{ - APIVersion: "v1", - Spec: &types.Spec{ - RestartPolicy: "Never", - NodeName: nodeName, - Containers: []types.Container{ - { - Name: podName, - VolumeMounts: append(utils.PgsqlRequiredVolumeMounts, - types.VolumeMount{MountPath: "/var/lib/uyuni-tools", Name: "var-lib-uyuni-tools"}), - Image: serverImage, - }, - }, - Volumes: append(utils.PgsqlRequiredVolumes, - types.Volume{Name: "var-lib-uyuni-tools", HostPath: &types.HostPath{Path: scriptDir, Type: "Directory"}}), - }, - } - // transform deploy data in JSON - override, err := GenerateOverrideDeployment(deployData) - if err != nil { - return nil, err - } - err = RunPod(namespace, podName, ServerFilter, serverImage, pullPolicy, command, override) - if err != nil { - return nil, utils.Errorf(err, L("cannot run inspect pod")) - } - - inspectResult, err := inspector.ReadInspectData() - if err != nil { - return nil, utils.Errorf(err, L("cannot inspect data")) - } - - return inspectResult, err -} diff --git a/shared/kubernetes/kubernetes.go b/shared/kubernetes/kubernetes.go index fa2986f66..64cb3f4cc 100644 --- a/shared/kubernetes/kubernetes.go +++ b/shared/kubernetes/kubernetes.go @@ -200,6 +200,13 @@ func AddSCCSecret(helmArgs []string, namespace string, scc *types.SCCCredentials func GetSCCSecret(namespace string, scc *types.SCCCredentials, appLabel string) (string, error) { const secretName = "scc-credentials" + // Return the existing secret if any. + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "-n", namespace, "secret", secretName, "-o", "name") + if err == nil && strings.TrimSpace(string(out)) != "" { + return secretName, nil + } + + // Create the secret if SCC user and password are passed. if scc.User != "" && scc.Password != "" { if err := createDockerSecret( namespace, secretName, "registry.suse.com", scc.User, scc.Password, appLabel, diff --git a/shared/kubernetes/pvc.go b/shared/kubernetes/pvc.go index d0a3c8cb6..b14736ccd 100644 --- a/shared/kubernetes/pvc.go +++ b/shared/kubernetes/pvc.go @@ -262,3 +262,14 @@ func CreateVolumes(mounts []types.VolumeMount) []core.Volume { } var runCmdOutput = utils.RunCmdOutput + +// HasVolume returns true if the pvcName persistent volume claim is bound. +func HasVolume(namespace string, pvcName string) bool { + out, err := runCmdOutput( + zerolog.DebugLevel, "kubectl", "get", "pvc", "-n", namespace, pvcName, "-o", "jsonpath={.status.phase}", + ) + if err != nil { + return false + } + return strings.TrimSpace(string(out)) == "Bound" +} diff --git a/shared/kubernetes/pvc_test.go b/shared/kubernetes/pvc_test.go index cdd2ddff1..58879270c 100644 --- a/shared/kubernetes/pvc_test.go +++ b/shared/kubernetes/pvc_test.go @@ -13,6 +13,27 @@ import ( "github.com/uyuni-project/uyuni-tools/shared/testutils" ) +func TestHasVolume(t *testing.T) { + type dataType struct { + err error + out string + expected bool + } + data := []dataType{ + {nil, "Bound\n", true}, + {nil, "Pending\n", false}, + {errors.New("PVC not found"), "", false}, + } + + for i, test := range data { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { + return []byte(test.out), test.err + } + actual := HasVolume("myns", "thepvc") + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected output", i), test.expected, actual) + } +} + func TestHasPersistentVolumeClaim(t *testing.T) { type dataType struct { err error diff --git a/shared/kubernetes/utils.go b/shared/kubernetes/utils.go index 5fa832db6..07910d3d7 100644 --- a/shared/kubernetes/utils.go +++ b/shared/kubernetes/utils.go @@ -92,6 +92,39 @@ func WaitForDeployments(namespace string, names ...string) error { return nil } +// WaitForRunningDeployment waits for a deployment to have at least one replica in running state. +func WaitForRunningDeployment(namespace string, name string) error { + log.Info().Msgf(L("Waiting for %[1]s deployment to be started in %[2]s namespace\n"), name, namespace) + for { + pods, err := getPodsForDeployment(namespace, name) + if err != nil { + return err + } + + if len(pods) > 0 { + jsonPath := "jsonpath={.status.containerStatuses[*].state.running.startedAt}" + if len(pods) > 1 { + jsonPath = "jsonpath={.items[*].status.containerStatuses[*].state.running.startedAt}" + } + out, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", "get", "pod", "-n", namespace, + "-o", jsonPath, + strings.Join(pods, " "), + ) + if err != nil { + return utils.Errorf(err, L("failed to check if the deployment has running pods")) + } + if strings.TrimSpace(string(out)) != "" { + break + } + if err := hasAllPodsFailed(namespace, pods, name); err != nil { + return err + } + } + time.Sleep(1 * time.Second) + } + return nil +} + // IsDeploymentReady returns true if a kubernetes deployment has at least one ready replica. // // An empty namespace means searching through all the namespaces. @@ -108,40 +141,103 @@ func IsDeploymentReady(namespace string, name string) (bool, error) { } } - // Search for the replica set matching the deployment + pods, err := getPodsForDeployment(namespace, name) + if err != nil { + return false, err + } + + if err := hasAllPodsFailed(namespace, pods, name); err != nil { + return false, err + } + + return false, nil +} + +func hasAllPodsFailed(namespace string, names []string, deployment string) error { + failedPods := 0 + for _, podName := range names { + if failed, err := isPodFailed(namespace, podName); err != nil { + return err + } else if failed { + failedPods = failedPods + 1 + } + } + if len(names) > 0 && failedPods == len(names) { + return fmt.Errorf(L("all the pods of %s deployment have a failure"), deployment) + } + return nil +} + +func getPodsForDeployment(namespace string, name string) ([]string, error) { + rs, err := getCurrentDeploymentReplicaSet(namespace, name) + if err != nil { + return []string{}, err + } + + // Check if all replica set pods have failed to start + return getPodsFromOwnerReference(namespace, rs) +} + +func getCurrentDeploymentReplicaSet(namespace string, name string) (string, error) { + // Get the replicasets matching the deployments and their revision as + // Kubernetes doesn't remove the old replicasets after update. + revisionPath := "{.metadata.annotations['deployment\\.kubernetes\\.io/revision']}" rsArgs := []string{ "get", "rs", "-o", - fmt.Sprintf("jsonpath={.items[?(@.metadata.ownerReferences[0].name=='%s')].metadata.name}", name), + fmt.Sprintf( + "jsonpath={range .items[?(@.metadata.ownerReferences[0].name=='%s')]}{.metadata.name},%s {end}", + name, revisionPath, + ), } rsArgs = addNamespace(rsArgs, namespace) - out, err = utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", rsArgs...) + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", rsArgs...) if err != nil { - return false, utils.Errorf(err, L("failed to find ReplicaSet for deployment %s"), name) + return "", utils.Errorf(err, L("failed to list ReplicaSets for deployment %s"), name) + } + replicasetsOut := strings.TrimSpace(string(out)) + // No replica, no deployment + if replicasetsOut == "" { + return "", nil } - rs := strings.TrimSpace(string(out)) - // Check if all replica set pods have failed to start - jsonpath = fmt.Sprintf("jsonpath={.items[?(@.metadata.ownerReferences[0].name=='%s')].metadata.name}", rs) - podArgs := []string{"get", "pod", "-o", jsonpath} - podArgs = addNamespace(podArgs, namespace) - out, err = utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", podArgs...) + // Get the current deployment revision to look for + out, err = runCmdOutput(zerolog.DebugLevel, "kubectl", "get", "deploy", "-n", namespace, name, + "-o", "jsonpath="+revisionPath, + ) if err != nil { - return false, utils.Errorf(err, L("failed to find pods for %s deployment"), name) + return "", utils.Errorf(err, L("failed to get the %s deployment revision"), name) } - pods := strings.Split(string(out), " ") - failedPods := 0 - for _, podName := range pods { - if failed, err := isPodFailed(namespace, podName); err != nil { - return false, err - } else if failed { - failedPods = failedPods + 1 + revision := strings.TrimSpace(string(out)) + + replicasets := strings.Split(replicasetsOut, " ") + for _, rs := range replicasets { + data := strings.SplitN(rs, ",", 2) + if len(data) != 2 { + return "", fmt.Errorf(L("invalid replicasset response: :%s"), replicasetsOut) + } + if data[1] == revision { + return data[0], nil } } - if failedPods == len(pods) { - return false, fmt.Errorf(L("all the pods of %s deployment have a failure"), name) + return "", nil +} + +func getPodsFromOwnerReference(namespace string, owner string) ([]string, error) { + jsonpath := fmt.Sprintf("jsonpath={.items[?(@.metadata.ownerReferences[0].name=='%s')].metadata.name}", owner) + podArgs := []string{"get", "pod", "-o", jsonpath} + podArgs = addNamespace(podArgs, namespace) + out, err := runCmdOutput(zerolog.DebugLevel, "kubectl", podArgs...) + if err != nil { + return []string{}, utils.Errorf(err, L("failed to find pods for owner reference %s"), owner) } - return false, nil + outStr := strings.TrimSpace(string(out)) + + pods := []string{} + if outStr != "" { + pods = strings.Split(outStr, " ") + } + return pods, nil } // isPodFailed checks if any of the containers of the pod are in BackOff state. @@ -190,35 +286,22 @@ func GetDeploymentStatus(namespace string, name string) (*DeploymentStatus, erro return &status, nil } -// ReplicasTo set the replica for an app to the given value. -// Scale the number of replicas of the server. -func ReplicasTo(namespace string, app string, replica uint) error { - args := []string{"scale", "deploy", app, "--replicas"} - log.Debug().Msgf("Setting replicas for pod in %s to %d", app, replica) - args = append(args, fmt.Sprint(replica), "-n", namespace) +// ReplicasTo set the replicas for a deployment to the given value. +func ReplicasTo(namespace string, name string, replica uint) error { + args := []string{"scale", "-n", namespace, "deploy", name, "--replicas", strconv.FormatUint(uint64(replica), 10)} + log.Debug().Msgf("Setting replicas for deployment in %s to %d", name, replica) _, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", args...) if err != nil { return utils.Errorf(err, L("cannot run kubectl %s"), args) } - pods, err := GetPods(namespace, "-l"+AppLabel+"="+app) - if err != nil { - return utils.Errorf(err, L("cannot get pods for %s"), app) + if err := waitForReplicas(namespace, name, replica); err != nil { + return err } - for _, pod := range pods { - if len(pod) > 0 { - err = waitForReplica(namespace, pod, replica) - if err != nil { - return utils.Errorf(err, L("replica to %d failed"), replica) - } - } - } - - log.Debug().Msgf("Replicas for pod in %s are now %d", app, replica) - - return err + log.Debug().Msgf("Replicas for %s deployment in %s are now %d", name, namespace, replica) + return nil } func isPodRunning(namespace string, podname string, filter string) (bool, error) { @@ -246,36 +329,11 @@ func GetPods(namespace string, filter string) (pods []string, err error) { return pods, err } -func waitForReplicaZero(namespace string, podname string) error { - waitSeconds := 120 - cmdArgs := []string{"get", "pod", podname, "-n", namespace} - - for i := 0; i < waitSeconds; i++ { - out, err := utils.RunCmdOutput(zerolog.DebugLevel, "kubectl", cmdArgs...) - /* Assume that if the command return an error at the first iteration, it's because it failed, - * next iteration because the pod was actually deleted - */ - if err != nil && i == 0 { - return utils.Errorf(err, L("cannot get pod informations %s"), podname) - } - outStr := strings.TrimSuffix(string(out), "\n") - if len(outStr) == 0 { - log.Debug().Msgf("Pod %s has been deleted", podname) - return nil - } - time.Sleep(1 * time.Second) - } - return fmt.Errorf(L("cannot set replicas for %s to zero"), podname) -} - -func waitForReplica(namespace string, podname string, replica uint) error { +func waitForReplicas(namespace string, name string, replicas uint) error { waitSeconds := 120 - log.Debug().Msgf("Checking replica for %s ready to %d", podname, replica) - if replica == 0 { - return waitForReplicaZero(namespace, podname) - } + log.Debug().Msgf("Checking replica for %s ready to %d", name, replicas) cmdArgs := []string{ - "get", "pod", podname, "-n", namespace, "--output=custom-columns=STATUS:.status.phase", "--no-headers", + "get", "deploy", name, "-n", namespace, "-o", "jsonpath={.status.readyReplicas}", "--no-headers", } for i := 0; i < waitSeconds; i++ { @@ -283,12 +341,18 @@ func waitForReplica(namespace string, podname string, replica uint) error { if err != nil { return utils.Errorf(err, L("cannot execute %s"), strings.Join(cmdArgs, string(" "))) } - outStr := strings.TrimSuffix(string(out), "\n") - if string(outStr) == "Running" { - log.Debug().Msgf("%s pod replica is now %d", podname, replica) - break + outStr := strings.TrimSpace(string(out)) + var readyReplicas uint64 + if outStr != "" { + var err error + readyReplicas, err = strconv.ParseUint(outStr, 10, 8) + if err != nil { + return utils.Errorf(err, L("invalid replicas result")) + } + } + if uint(readyReplicas) == replicas { + return nil } - log.Debug().Msgf("Pod %s replica is %s in %d seconds.", podname, string(out), i) time.Sleep(1 * time.Second) } return nil diff --git a/shared/kubernetes/utils_test.go b/shared/kubernetes/utils_test.go new file mode 100644 index 000000000..a4a25d5ca --- /dev/null +++ b/shared/kubernetes/utils_test.go @@ -0,0 +1,124 @@ +// SPDX-FileCopyrightText: 2024 SUSE LLC +// +// SPDX-License-Identifier: Apache-2.0 + +package kubernetes + +import ( + "errors" + "fmt" + "strings" + "testing" + + "github.com/rs/zerolog" + "github.com/uyuni-project/uyuni-tools/shared/testutils" + "github.com/uyuni-project/uyuni-tools/shared/utils" +) + +func TestGetCurrentDeploymentReplicaSet(t *testing.T) { + type testType struct { + rsOut string + rsErr error + revisionOut string + revisionErr error + expected string + expectedError bool + } + + testCases := []testType{ + { + rsOut: "uyuni-64d597fccf,1 uyuni-66f7677dc6,2\n", + rsErr: nil, + revisionOut: "2\n", + revisionErr: nil, + expected: "uyuni-66f7677dc6", + expectedError: false, + }, + { + rsOut: "uyuni-64d597fccf,1\n", + rsErr: nil, + revisionOut: "1\n", + revisionErr: nil, + expected: "uyuni-64d597fccf", + expectedError: false, + }, + { + rsOut: "\n", + rsErr: nil, + revisionOut: "not found\n", + revisionErr: errors.New("not found"), + expected: "", + expectedError: false, + }, + { + rsOut: "get rs error\n", + rsErr: errors.New("get rs error"), + revisionOut: "1\n", + revisionErr: nil, + expected: "", + expectedError: true, + }, + { + rsOut: "uyuni-64d597fccf,1\n", + rsErr: nil, + revisionOut: "get rev error\n", + revisionErr: errors.New("get rev error"), + expected: "", + expectedError: true, + }, + } + + for i, test := range testCases { + runCmdOutput = func(_ zerolog.Level, _ string, args ...string) ([]byte, error) { + if utils.Contains(args, "rs") { + return []byte(test.rsOut), test.rsErr + } + return []byte(test.revisionOut), test.revisionErr + } + actual, err := getCurrentDeploymentReplicaSet("uyunins", "uyuni") + caseMsg := fmt.Sprintf("test %d: ", i+1) + testutils.AssertEquals(t, fmt.Sprintf("%sunexpected error raised: %s", caseMsg, err), + test.expectedError, err != nil, + ) + testutils.AssertEquals(t, caseMsg+"unexpected result", test.expected, actual) + } +} + +func TestGetPodsFromOwnerReference(t *testing.T) { + type testType struct { + out string + err error + expected []string + } + + data := []testType{ + { + out: "pod1 pod2 pod3\n", + err: nil, + expected: []string{"pod1", "pod2", "pod3"}, + }, + { + out: "\n", + err: nil, + expected: []string{}, + }, + { + out: "error\n", + err: errors.New("some error"), + expected: []string{}, + }, + } + + for i, test := range data { + runCmdOutput = func(_ zerolog.Level, _ string, _ ...string) ([]byte, error) { + return []byte(test.out), test.err + } + actual, err := getPodsFromOwnerReference("myns", "owner") + if test.err == nil { + testutils.AssertTrue(t, "Shouldn't have raise an error", err == nil) + } else { + testutils.AssertTrue(t, "Unexpected error raised", strings.Contains(err.Error(), test.err.Error())) + } + testutils.AssertEquals(t, fmt.Sprintf("test %d: unexpected result", i+1), test.expected, actual) + } +} diff --git a/shared/types/ssl.go b/shared/types/ssl.go index 5daf2bef8..f80b18371 100644 --- a/shared/types/ssl.go +++ b/shared/types/ssl.go @@ -20,6 +20,8 @@ type SSLCertGenerationFlags struct { type CaChain struct { Root string Intermediate []string + // Key is the CA key file in the case of a migration of a self-generate CA. + Key string } // SSLPair is a type for SSL Cert and Key. diff --git a/shared/utils/inspector.go b/shared/utils/inspector.go index c446466a7..78b21599e 100644 --- a/shared/utils/inspector.go +++ b/shared/utils/inspector.go @@ -8,6 +8,7 @@ import ( "bytes" "os" "path" + "strings" "github.com/rs/zerolog/log" "github.com/spf13/viper" @@ -46,6 +47,21 @@ type BaseInspector struct { Values []types.InspectData } +// GenerateScriptString creates the inspector script and returns it as a string. +func (i *BaseInspector) GenerateScriptString() (string, error) { + data := templates.InspectTemplateData{ + Param: i.Values, + OutputFile: i.GetDataPath(), + } + + scriptBuilder := new(strings.Builder) + if err := data.Render(scriptBuilder); err != nil { + return "", err + } + + return scriptBuilder.String(), nil +} + // GenerateScript is a common implementation for all inspectors. func (i *BaseInspector) GenerateScript() error { log.Debug().Msgf("Generating inspect script in %s", i.GetScriptPath()) diff --git a/shared/utils/volumes.go b/shared/utils/volumes.go index c0aefe242..84a2a7993 100644 --- a/shared/utils/volumes.go +++ b/shared/utils/volumes.go @@ -6,29 +6,23 @@ package utils import "github.com/uyuni-project/uyuni-tools/shared/types" +// EtcRhnVolumeMount defines the /etc/rhn volume mount. +var EtcRhnVolumeMount = types.VolumeMount{MountPath: "/etc/rhn", Name: "etc-rhn", Size: "1Mi"} + +// VarPgsqlVolumeMount defines the /var/lib/pgsql volume mount. +var VarPgsqlVolumeMount = types.VolumeMount{MountPath: "/var/lib/pgsql", Name: "var-pgsql", Size: "50Gi"} + +// RootVolumeMount defines the /root volume mount. +var RootVolumeMount = types.VolumeMount{MountPath: "/root", Name: "root", Size: "1Mi"} + // PgsqlRequiredVolumeMounts represents volumes mount used by PostgreSQL. var PgsqlRequiredVolumeMounts = []types.VolumeMount{ {MountPath: "/etc/pki/tls", Name: "etc-tls", Size: "1Mi"}, - {MountPath: "/var/lib/pgsql", Name: "var-pgsql", Size: "50Gi"}, - {MountPath: "/etc/rhn", Name: "etc-rhn", Size: "1Mi"}, + VarPgsqlVolumeMount, + EtcRhnVolumeMount, {MountPath: "/etc/pki/spacewalk-tls", Name: "tls-key"}, } -// PgsqlRequiredVolumes represents volumes used by PostgreSQL. -var PgsqlRequiredVolumes = []types.Volume{ - {Name: "etc-tls", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-tls"}}, - {Name: "var-pgsql", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "var-pgsql"}}, - {Name: "etc-rhn", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-rhn"}}, - {Name: "tls-key", - Secret: &types.Secret{ - SecretName: "uyuni-cert", Items: []types.SecretItem{ - {Key: "tls.crt", Path: "spacewalk.crt"}, - {Key: "tls.key", Path: "spacewalk.key"}, - }, - }, - }, -} - // etcServerVolumeMounts represents volumes mounted in /etc folder. var etcServerVolumeMounts = []types.VolumeMount{ {MountPath: "/etc/apache2", Name: "etc-apache2", Size: "1Mi"}, @@ -42,22 +36,7 @@ var etcServerVolumeMounts = []types.VolumeMount{ {MountPath: "/etc/sssd", Name: "etc-sssd", Size: "1Mi"}, } -// EtcServerVolumes represents volumes used for configuration. -var EtcServerVolumes = []types.Volume{ - {Name: "etc-apache2", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-apache2"}}, - {Name: "etc-systemd-multi", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-systemd-multi"}}, - {Name: "etc-systemd-sockets", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-systemd-sockets"}}, - {Name: "etc-salt", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-salt"}}, - {Name: "etc-tomcat", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-tomcat"}}, - {Name: "etc-cobbler", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-cobbler"}}, - {Name: "etc-sysconfig", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-sysconfig"}}, - {Name: "etc-postfix", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-postfix"}}, - {Name: "etc-rhn", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-rhn"}}, - {Name: "etc-sssd", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "etc-sssd"}}, -} - var etcAndPgsqlVolumeMounts = append(PgsqlRequiredVolumeMounts, etcServerVolumeMounts[:]...) -var etcAndPgsqlVolumes = append(PgsqlRequiredVolumes, EtcServerVolumes[:]...) // ServerVolumeMounts should match the volumes mapping from the container definition in both // the helm chart and the systemctl services definitions. @@ -75,28 +54,10 @@ var ServerVolumeMounts = append([]types.VolumeMount{ {MountPath: "/srv/pillar", Name: "srv-pillar", Size: "10Mi"}, {MountPath: "/srv/susemanager", Name: "srv-susemanager", Size: "1Mi"}, {MountPath: "/srv/spacewalk", Name: "srv-spacewalk", Size: "10Mi"}, - {MountPath: "/root", Name: "root", Size: "1Mi"}, + RootVolumeMount, {MountPath: "/etc/pki/trust/anchors/", Name: "ca-cert"}, }, etcAndPgsqlVolumeMounts[:]...) -// ServerVolumes match the volume with Persistent Volume Claim. -var ServerVolumes = append([]types.Volume{ - {Name: "var-cobbler", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "var-cobbler"}}, - {Name: "var-salt", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "var-salt"}}, - {Name: "var-cache", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "var-cache"}}, - {Name: "var-spacewalk", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "var-spacewalk"}}, - {Name: "var-log", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "var-log"}}, - {Name: "srv-salt", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-salt"}}, - {Name: "srv-www", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-www"}}, - {Name: "srv-tftpboot", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-tftpboot"}}, - {Name: "srv-formulametadata", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-formulametadata"}}, - {Name: "srv-pillar", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-pillar"}}, - {Name: "srv-susemanager", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-susemanager"}}, - {Name: "srv-spacewalk", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "srv-spacewalk"}}, - {Name: "root", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "root"}}, - {Name: "ca-cert", PersistentVolumeClaim: &types.PersistentVolumeClaim{ClaimName: "ca-cert"}}, -}, etcAndPgsqlVolumes[:]...) - // HubXmlrpcVolumeMounts represents volumes used by Hub Xmlrpc container. var HubXmlrpcVolumeMounts = []types.VolumeMount{ {MountPath: "/etc/pki/trust/anchors", Name: "ca-cert"}, diff --git a/uyuni-tools.changes.cbosdo.k8s-refactoring b/uyuni-tools.changes.cbosdo.k8s-refactoring new file mode 100644 index 000000000..847c0c1df --- /dev/null +++ b/uyuni-tools.changes.cbosdo.k8s-refactoring @@ -0,0 +1 @@ +- Stop using a helm chart to install the server From eba931808baba920da4e03789ac26d2d2cabbe3f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Fri, 8 Nov 2024 16:05:53 +0100 Subject: [PATCH 14/19] Migration script improvements There is no need to run a potentially lengthy reindexing on minor upgrades, only on major ones. Don't call su with `-` parameter as it shows the warning message for terminals... and that looks ugly in logs. --- mgradm/shared/kubernetes/dbFinalize.go | 2 +- mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mgradm/shared/kubernetes/dbFinalize.go b/mgradm/shared/kubernetes/dbFinalize.go index 37f2c2133..b096f9872 100644 --- a/mgradm/shared/kubernetes/dbFinalize.go +++ b/mgradm/shared/kubernetes/dbFinalize.go @@ -54,7 +54,7 @@ func getDBFinalizeJob( // Prepare the script scriptData := templates.FinalizePostgresTemplateData{ RunAutotune: true, - RunReindex: true, + RunReindex: migration, RunSchemaUpdate: schemaUpdateRequired, Migration: migration, Kubernetes: true, diff --git a/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go b/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go index d4c22592d..723a6a5f8 100644 --- a/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go +++ b/mgradm/shared/templates/pgsqlFinalizeScriptTemplate.go @@ -30,7 +30,7 @@ echo "Running smdba system-check autotuning..." smdba system-check autotuning {{ end }} echo "Starting Postgresql..." -su -s /bin/bash - postgres -c "/usr/share/postgresql/postgresql-script start" +HOME=/var/lib/pgsql PG_DATA=/var/lib/pgsql/data su -s /bin/bash -p postgres -c "/usr/share/postgresql/postgresql-script start" {{ if .RunReindex }} echo "Reindexing database. This may take a while, please do not cancel it!" database=$(sed -n "s/^\s*db_name\s*=\s*\([^ ]*\)\s*$/\1/p" /etc/rhn/rhn.conf) @@ -50,7 +50,6 @@ UPDATE rhnKickstartableTree SET base_path = CONCAT('/srv/www/distributions/', ta from dist_map WHERE dist_map.base_path = rhnKickstartableTree.base_path; DROP TABLE dist_map; EOT -{{ end }} echo "Schedule a system list update task..." spacewalk-sql --select-mode - < Date: Wed, 13 Nov 2024 11:59:01 +0100 Subject: [PATCH 15/19] Change the --helm-* parameters into --kubernetes-* Since helm is no longer used installing Uyuni, but only cert-manager, rename the flags. Also drop those that are no longer used for the server after the refactoring. --- .../cmd/install/kubernetes/kubernetes_test.go | 4 +-- .../cmd/migrate/kubernetes/kubernetes_test.go | 4 +-- mgradm/cmd/migrate/kubernetes/utils.go | 4 +-- .../cmd/upgrade/kubernetes/kubernetes_test.go | 4 +-- mgradm/cmd/upgrade/kubernetes/utils.go | 6 ++-- mgradm/shared/kubernetes/certificates.go | 10 +++---- mgradm/shared/kubernetes/flags.go | 2 +- mgradm/shared/kubernetes/reconcile.go | 16 +++++----- mgradm/shared/utils/cmd_utils.go | 30 ++++++++----------- mgradm/shared/utils/types.go | 4 +-- shared/connection.go | 2 +- shared/testutils/flagstests/mgradm.go | 24 ++++++--------- 12 files changed, 50 insertions(+), 60 deletions(-) diff --git a/mgradm/cmd/install/kubernetes/kubernetes_test.go b/mgradm/cmd/install/kubernetes/kubernetes_test.go index 097d5fa2b..003035381 100644 --- a/mgradm/cmd/install/kubernetes/kubernetes_test.go +++ b/mgradm/cmd/install/kubernetes/kubernetes_test.go @@ -18,7 +18,7 @@ import ( func TestParamsParsing(t *testing.T) { args := flagstests.InstallFlagsTestArgs() - args = append(args, flagstests.ServerHelmFlagsTestArgs...) + args = append(args, flagstests.ServerKubernetesFlagsTestArgs...) args = append(args, flagstests.VolumesFlagsTestExpected...) args = append(args, "srv.fq.dn") @@ -27,7 +27,7 @@ func TestParamsParsing(t *testing.T) { _ *cobra.Command, args []string, ) error { flagstests.AssertInstallFlags(t, &flags.ServerFlags) - flagstests.AssertServerHelmFlags(t, &flags.Helm) + flagstests.AssertServerKubernetesFlags(t, &flags.Kubernetes) flagstests.AssertVolumesFlags(t, &flags.Volumes) testutils.AssertEquals(t, "Wrong FQDN", "srv.fq.dn", args[0]) return nil diff --git a/mgradm/cmd/migrate/kubernetes/kubernetes_test.go b/mgradm/cmd/migrate/kubernetes/kubernetes_test.go index 4e4bc44ba..da5ecc1c2 100644 --- a/mgradm/cmd/migrate/kubernetes/kubernetes_test.go +++ b/mgradm/cmd/migrate/kubernetes/kubernetes_test.go @@ -34,7 +34,7 @@ func TestParamsParsing(t *testing.T) { args = append(args, flagstests.DBUpdateImageFlagTestArgs...) args = append(args, flagstests.CocoFlagsTestArgs...) args = append(args, flagstests.HubXmlrpcFlagsTestArgs...) - args = append(args, flagstests.ServerHelmFlagsTestArgs...) + args = append(args, flagstests.ServerKubernetesFlagsTestArgs...) args = append(args, flagstests.VolumesFlagsTestExpected...) // Test function asserting that the args are properly parsed @@ -49,7 +49,7 @@ func TestParamsParsing(t *testing.T) { flagstests.AssertCocoFlag(t, &flags.Coco) flagstests.AssertHubXmlrpcFlag(t, &flags.HubXmlrpc) testutils.AssertEquals(t, "Error parsing --user", "sudoer", flags.Migration.User) - flagstests.AssertServerHelmFlags(t, &flags.Helm) + flagstests.AssertServerKubernetesFlags(t, &flags.Kubernetes) flagstests.AssertVolumesFlags(t, &flags.Volumes) testutils.AssertEquals(t, "Error parsing --ssl-password", "sslsecret", flags.Installation.SSL.Password) testutils.AssertEquals(t, "Error parsing --ssh-key-public", "path/ssh.pub", flags.SSH.Key.Public) diff --git a/mgradm/cmd/migrate/kubernetes/utils.go b/mgradm/cmd/migrate/kubernetes/utils.go index b2b1ae22e..2e0bdc48c 100644 --- a/mgradm/cmd/migrate/kubernetes/utils.go +++ b/mgradm/cmd/migrate/kubernetes/utils.go @@ -26,7 +26,7 @@ func migrateToKubernetes( _ *cobra.Command, args []string, ) error { - namespace := flags.Helm.Uyuni.Namespace + namespace := flags.Kubernetes.Uyuni.Namespace // Create the namespace if not present if err := kubernetes.CreateNamespace(namespace); err != nil { @@ -61,7 +61,7 @@ func migrateToKubernetes( // Create a secret using SCC credentials if any are provided pullSecret, err := shared_kubernetes.GetSCCSecret( - flags.Helm.Uyuni.Namespace, &flags.Installation.SCC, shared_kubernetes.ServerApp, + flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SCC, shared_kubernetes.ServerApp, ) if err != nil { return err diff --git a/mgradm/cmd/upgrade/kubernetes/kubernetes_test.go b/mgradm/cmd/upgrade/kubernetes/kubernetes_test.go index e747c8861..c1c8844c9 100644 --- a/mgradm/cmd/upgrade/kubernetes/kubernetes_test.go +++ b/mgradm/cmd/upgrade/kubernetes/kubernetes_test.go @@ -24,7 +24,7 @@ func TestParamsParsing(t *testing.T) { args = append(args, flagstests.CocoFlagsTestArgs...) args = append(args, flagstests.HubXmlrpcFlagsTestArgs...) args = append(args, flagstests.SCCFlagTestArgs...) - args = append(args, flagstests.ServerHelmFlagsTestArgs...) + args = append(args, flagstests.ServerKubernetesFlagsTestArgs...) // Test function asserting that the args are properly parsed tester := func(_ *types.GlobalFlags, flags *kubernetes.KubernetesServerFlags, @@ -35,7 +35,7 @@ func TestParamsParsing(t *testing.T) { flagstests.AssertCocoFlag(t, &flags.Coco) flagstests.AssertHubXmlrpcFlag(t, &flags.HubXmlrpc) // TODO Assert SCC flags - flagstests.AssertServerHelmFlags(t, &flags.Helm) + flagstests.AssertServerKubernetesFlags(t, &flags.Kubernetes) return nil } diff --git a/mgradm/cmd/upgrade/kubernetes/utils.go b/mgradm/cmd/upgrade/kubernetes/utils.go index 2fa1986f9..96d89779b 100644 --- a/mgradm/cmd/upgrade/kubernetes/utils.go +++ b/mgradm/cmd/upgrade/kubernetes/utils.go @@ -13,10 +13,10 @@ import ( ) func upgradeKubernetes( - globalFlags *types.GlobalFlags, + _ *types.GlobalFlags, flags *kubernetes.KubernetesServerFlags, - cmd *cobra.Command, - args []string, + _ *cobra.Command, + _ []string, ) error { return kubernetes.Reconcile(flags, "") } diff --git a/mgradm/shared/kubernetes/certificates.go b/mgradm/shared/kubernetes/certificates.go index 369c3fda0..dedec1e3b 100644 --- a/mgradm/shared/kubernetes/certificates.go +++ b/mgradm/shared/kubernetes/certificates.go @@ -155,15 +155,15 @@ func waitForIssuer(namespace string, name string) error { } // InstallCertManager deploys the cert-manager helm chart with the CRDs. -func InstallCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, imagePullPolicy string) error { +func InstallCertManager(kubernetesFlags *cmd_utils.KubernetesFlags, kubeconfig string, imagePullPolicy string) error { if ready, err := kubernetes.IsDeploymentReady("", "cert-manager"); err != nil { return err } else if !ready { log.Info().Msg(L("Installing cert-manager")) repo := "" - chart := helmFlags.CertManager.Chart - version := helmFlags.CertManager.Version - namespace := helmFlags.CertManager.Namespace + chart := kubernetesFlags.CertManager.Chart + version := kubernetesFlags.CertManager.Version + namespace := kubernetesFlags.CertManager.Namespace args := []string{ "--set", "crds.enabled=true", @@ -171,7 +171,7 @@ func InstallCertManager(helmFlags *cmd_utils.HelmFlags, kubeconfig string, image "--set-json", "global.commonLabels={\"installedby\": \"mgradm\"}", "--set", "image.pullPolicy=" + string(kubernetes.GetPullPolicy(imagePullPolicy)), } - extraValues := helmFlags.CertManager.Values + extraValues := kubernetesFlags.CertManager.Values if extraValues != "" { args = append(args, "-f", extraValues) } diff --git a/mgradm/shared/kubernetes/flags.go b/mgradm/shared/kubernetes/flags.go index c73a76483..5075def85 100644 --- a/mgradm/shared/kubernetes/flags.go +++ b/mgradm/shared/kubernetes/flags.go @@ -11,7 +11,7 @@ import "github.com/uyuni-project/uyuni-tools/mgradm/shared/utils" // KubernetesServerFlags is the aggregation of all flags for install, upgrade and migrate. type KubernetesServerFlags struct { utils.ServerFlags `mapstructure:",squash"` - Helm utils.HelmFlags + Kubernetes utils.KubernetesFlags Volumes utils.VolumesFlags // SSH defines the SSH configuration to use to connect to the source server to migrate. SSH utils.SSHFlags diff --git a/mgradm/shared/kubernetes/reconcile.go b/mgradm/shared/kubernetes/reconcile.go index 35ed69836..eb908424f 100644 --- a/mgradm/shared/kubernetes/reconcile.go +++ b/mgradm/shared/kubernetes/reconcile.go @@ -29,7 +29,7 @@ func Reconcile(flags *KubernetesServerFlags, fqdn string) error { return errors.New(L("install kubectl before running this command")) } - namespace := flags.Helm.Uyuni.Namespace + namespace := flags.Kubernetes.Uyuni.Namespace // Create the namespace if not present if err := CreateNamespace(namespace); err != nil { return err @@ -43,7 +43,9 @@ func Reconcile(flags *KubernetesServerFlags, fqdn string) error { cnx := shared.NewConnection("kubectl", "", kubernetes.ServerFilter) // Create a secret using SCC credentials if any are provided - pullSecret, err := kubernetes.GetSCCSecret(flags.Helm.Uyuni.Namespace, &flags.Installation.SCC, kubernetes.ServerApp) + pullSecret, err := kubernetes.GetSCCSecret( + flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SCC, kubernetes.ServerApp, + ) if err != nil { return err } @@ -192,7 +194,7 @@ func Reconcile(flags *KubernetesServerFlags, fqdn string) error { // Deploy the SSL CA and server certificates var caIssuer string if flags.Installation.SSL.UseExisting() { - if err := DeployExistingCertificate(flags.Helm.Uyuni.Namespace, &flags.Installation.SSL); err != nil { + if err := DeployExistingCertificate(flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SSL); err != nil { return err } } else if !HasIssuer(namespace, kubernetes.CaIssuerName) { @@ -200,7 +202,7 @@ func Reconcile(flags *KubernetesServerFlags, fqdn string) error { // Note that in an operator we won't be able to install cert-manager and just wait for it to be installed. kubeconfig := clusterInfos.GetKubeconfig() - if err := InstallCertManager(&flags.Helm, kubeconfig, flags.Image.PullPolicy); err != nil { + if err := InstallCertManager(&flags.Kubernetes, kubeconfig, flags.Image.PullPolicy); err != nil { return utils.Errorf(err, L("cannot install cert manager")) } @@ -223,18 +225,18 @@ func Reconcile(flags *KubernetesServerFlags, fqdn string) error { return err } } else { - if err := DeployGeneratedCa(flags.Helm.Uyuni.Namespace, &flags.Installation.SSL, fqdn); err != nil { + if err := DeployGeneratedCa(flags.Kubernetes.Uyuni.Namespace, &flags.Installation.SSL, fqdn); err != nil { return err } } // Wait for issuer to be ready - if err := waitForIssuer(flags.Helm.Uyuni.Namespace, kubernetes.CaIssuerName); err != nil { + if err := waitForIssuer(flags.Kubernetes.Uyuni.Namespace, kubernetes.CaIssuerName); err != nil { return err } // Extract the CA cert into uyuni-ca config map as the container shouldn't have the CA secret - if err := extractCaCertToConfig(flags.Helm.Uyuni.Namespace); err != nil { + if err := extractCaCertToConfig(flags.Kubernetes.Uyuni.Namespace); err != nil { return err } caIssuer = kubernetes.CaIssuerName diff --git a/mgradm/shared/utils/cmd_utils.go b/mgradm/shared/utils/cmd_utils.go index b978c96b4..eeb806c9d 100644 --- a/mgradm/shared/utils/cmd_utils.go +++ b/mgradm/shared/utils/cmd_utils.go @@ -35,30 +35,24 @@ func (f *InstallSSLFlags) CheckParameters() { // AddHelmInstallFlag add Helm install flags to a command. func AddHelmInstallFlag(cmd *cobra.Command) { - defaultChart := fmt.Sprintf("oci://%s/server-helm", utils.DefaultHelmRegistry) - - cmd.Flags().String("helm-uyuni-namespace", "default", L("Kubernetes namespace where to install uyuni")) - cmd.Flags().String("helm-uyuni-chart", defaultChart, L("URL to the uyuni helm chart")) - cmd.Flags().String("helm-uyuni-version", "", L("Version of the uyuni helm chart")) - cmd.Flags().String("helm-uyuni-values", "", L("Path to a values YAML file to use for Uyuni helm install")) - cmd.Flags().String("helm-certmanager-namespace", "cert-manager", + cmd.Flags().String("kubernetes-uyuni-namespace", "default", L("Kubernetes namespace where to install uyuni")) + cmd.Flags().String("kubernetes-certmanager-namespace", "cert-manager", L("Kubernetes namespace where to install cert-manager"), ) - cmd.Flags().String("helm-certmanager-chart", "", + cmd.Flags().String("kubernetes-certmanager-chart", "", L("URL to the cert-manager helm chart. To be used for offline installations"), ) - cmd.Flags().String("helm-certmanager-version", "", L("Version of the cert-manager helm chart")) - cmd.Flags().String("helm-certmanager-values", "", L("Path to a values YAML file to use for cert-manager helm install")) + cmd.Flags().String("kubernetes-certmanager-version", "", L("Version of the cert-manager helm chart")) + cmd.Flags().String("kubernetes-certmanager-values", "", + L("Path to a values YAML file to use for cert-manager helm install"), + ) _ = utils.AddFlagHelpGroup(cmd, &utils.Group{ID: "helm", Title: L("Helm Chart Flags")}) - _ = utils.AddFlagToHelpGroupID(cmd, "helm-uyuni-namespace", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-uyuni-chart", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-uyuni-version", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-uyuni-values", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-certmanager-namespace", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-certmanager-chart", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-certmanager-version", "helm") - _ = utils.AddFlagToHelpGroupID(cmd, "helm-certmanager-values", "helm") + _ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-uyuni-namespace", "helm") + _ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-certmanager-namespace", "helm") + _ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-certmanager-chart", "helm") + _ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-certmanager-version", "helm") + _ = utils.AddFlagToHelpGroupID(cmd, "kubernetes-certmanager-values", "helm") } const volumesFlagsGroupID = "volumes" diff --git a/mgradm/shared/utils/types.go b/mgradm/shared/utils/types.go index e8fdbc5f8..77abb1cd2 100644 --- a/mgradm/shared/utils/types.go +++ b/mgradm/shared/utils/types.go @@ -15,8 +15,8 @@ type InstallSSLFlags struct { Server types.SSLPair } -// HelmFlags stores Uyuni and Cert Manager Helm information. -type HelmFlags struct { +// KubernetesFlags stores Uyuni and Cert Manager kubernetes specific parameters. +type KubernetesFlags struct { Uyuni types.ChartFlags CertManager types.ChartFlags } diff --git a/shared/connection.go b/shared/connection.go index 9beb6ea67..c55939812 100644 --- a/shared/connection.go +++ b/shared/connection.go @@ -123,7 +123,7 @@ func (c *Connection) GetCommand() (string, error) { // GetNamespace finds the namespace of the running pod // appName is the name of the application to look for, if not provided it will be guessed based on the filter. // filters are additional filters to use to find the pod. -func (c *Connection) GetNamespace(appName string, filters ...string) (string, error) { +func (c *Connection) GetNamespace(appName string) (string, error) { // skip if namespace is already set if c.namespace != "" { return c.namespace, nil diff --git a/shared/testutils/flagstests/mgradm.go b/shared/testutils/flagstests/mgradm.go index 416ae85e3..0d0a741ea 100644 --- a/shared/testutils/flagstests/mgradm.go +++ b/shared/testutils/flagstests/mgradm.go @@ -12,24 +12,18 @@ import ( "github.com/uyuni-project/uyuni-tools/shared/types" ) -// ServerHelmFlagsTestArgs is the expected values for AssertHelmInstallFlags. -var ServerHelmFlagsTestArgs = []string{ - "--helm-uyuni-namespace", "uyunins", - "--helm-uyuni-chart", "oci://srv/uyuni", - "--helm-uyuni-version", "1.2.3", - "--helm-uyuni-values", "uyuni/values.yaml", - "--helm-certmanager-namespace", "certmanagerns", - "--helm-certmanager-chart", "oci://srv/certmanager", - "--helm-certmanager-version", "4.5.6", - "--helm-certmanager-values", "certmanager/values.yaml", +// ServerKubernetesFlagsTestArgs are the expected values for AssertServerKubernetesFlags. +var ServerKubernetesFlagsTestArgs = []string{ + "--kubernetes-uyuni-namespace", "uyunins", + "--kubernetes-certmanager-namespace", "certmanagerns", + "--kubernetes-certmanager-chart", "oci://srv/certmanager", + "--kubernetes-certmanager-version", "4.5.6", + "--kubernetes-certmanager-values", "certmanager/values.yaml", } -// AssertServerHelmFlags checks that all Helm flags are parsed correctly. -func AssertServerHelmFlags(t *testing.T, flags *utils.HelmFlags) { +// AssertServerKubernetesFlags checks that all Kubernetes flags are parsed correctly. +func AssertServerKubernetesFlags(t *testing.T, flags *utils.KubernetesFlags) { testutils.AssertEquals(t, "Error parsing --helm-uyuni-namespace", "uyunins", flags.Uyuni.Namespace) - testutils.AssertEquals(t, "Error parsing --helm-uyuni-chart", "oci://srv/uyuni", flags.Uyuni.Chart) - testutils.AssertEquals(t, "Error parsing --helm-uyuni-version", "1.2.3", flags.Uyuni.Version) - testutils.AssertEquals(t, "Error parsing --helm-uyuni-values", "uyuni/values.yaml", flags.Uyuni.Values) testutils.AssertEquals(t, "Error parsing --helm-certmanager-namespace", "certmanagerns", flags.CertManager.Namespace, ) From dbe6f6f4f00512da7cef54a28180ad5a5509f7bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 14 Nov 2024 15:07:05 +0100 Subject: [PATCH 16/19] Disable CGO build for Debian i586 to avoid cross-compiling With CGO enabled there are include problems on that architecture and that would probably require cross-compiling. --- uyuni-tools.spec | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/uyuni-tools.spec b/uyuni-tools.spec index 0a07f5d67..3d4b1bcfa 100644 --- a/uyuni-tools.spec +++ b/uyuni-tools.spec @@ -291,6 +291,13 @@ Fish command line completion support for %{name_ctl}. tar -zxf %{SOURCE1} %build +%ifarch i386 +%if 0%{?debian} +# Disable CGO build for debian 32 bits to avoid cross-compilation +export CGO_ENABLED=0 +%endif +%endif + export GOFLAGS=-mod=vendor mkdir -p bin UTILS_PATH="%{provider_prefix}/shared/utils" From a4b2844db5083849b0e0915e4e6cc0f95ce77b62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Thu, 14 Nov 2024 16:51:32 +0100 Subject: [PATCH 17/19] Handle traefik helm chart breaking change to expose ports Traefik helm chart changed the structure of the expose property starting version 27. Read the chart version from the trafik.yaml file and write the config accordingly. --- shared/kubernetes/k3s.go | 32 ++++++++++++++++++++++++- shared/kubernetes/k3sTraefikTemplate.go | 13 +++++++--- 2 files changed, 41 insertions(+), 4 deletions(-) diff --git a/shared/kubernetes/k3s.go b/shared/kubernetes/k3s.go index 53b1d53a1..c1ff3a480 100644 --- a/shared/kubernetes/k3s.go +++ b/shared/kubernetes/k3s.go @@ -8,6 +8,8 @@ import ( "errors" "fmt" "os" + "regexp" + "strconv" "time" "github.com/rs/zerolog" @@ -18,6 +20,7 @@ import ( ) const k3sTraefikConfigPath = "/var/lib/rancher/k3s/server/manifests/uyuni-traefik-config.yaml" +const k3sTraefikMainConfigPath = "/var/lib/rancher/k3s/server/manifests/traefik.yaml" // InstallK3sTraefikConfig install K3s Traefik configuration. func InstallK3sTraefikConfig(ports []types.PortMap) error { @@ -28,8 +31,14 @@ func InstallK3sTraefikConfig(ports []types.PortMap) error { port.Name = GetTraefikEndpointName(port) endpoints = append(endpoints, port) } + version, err := getTraefikChartMajorVersion() + if err != nil { + return err + } + data := K3sTraefikConfigTemplateData{ - Ports: endpoints, + Ports: endpoints, + ExposeBoolean: version < 27, } if err := utils.WriteTemplateToFile(data, k3sTraefikConfigPath, 0600, true); err != nil { return utils.Errorf(err, L("Failed to write Traefik configuration")) @@ -103,3 +112,24 @@ func UninstallK3sTraefikConfig(dryRun bool) { // Now that it's reinstalled, remove the file utils.UninstallFile(k3sTraefikConfigPath, dryRun) } + +func getTraefikChartMajorVersion() (int, error) { + out, err := os.ReadFile(k3sTraefikMainConfigPath) + if err != nil { + return 0, utils.Errorf(err, L("failed to read the traefik configuration")) + } + matches := regexp.MustCompile(`traefik-([0-9]+)`).FindStringSubmatch(string(out)) + if matches == nil { + return 0, errors.New(L("traefik configuration file doesn't contain the helm chart version")) + } + if len(matches) != 2 { + return 0, errors.New(L("failed to find traefik helm chart version")) + } + + majorVersion, err := strconv.Atoi(matches[1]) + if err != nil { + return 0, utils.Errorf(err, L("")) + } + + return majorVersion, nil +} diff --git a/shared/kubernetes/k3sTraefikTemplate.go b/shared/kubernetes/k3sTraefikTemplate.go index 931a9960e..4528d20d8 100644 --- a/shared/kubernetes/k3sTraefikTemplate.go +++ b/shared/kubernetes/k3sTraefikTemplate.go @@ -22,19 +22,26 @@ spec: {{- range .Ports }} {{ .Name }}: port: {{ .Port }} + {{- if $.ExposeBoolean }} expose: true + {{- else }} + expose: + default: true + {{- end }} exposedPort: {{ .Exposed }} - {{- if eq .Protocol "udp" }} + {{- if eq .Protocol "udp" }} protocol: UDP - {{- else }} + {{- else }} protocol: TCP - {{- end }} + {{- end }} {{- end }} ` // K3sTraefikConfigTemplateData represents information used to create K3s Traefik helm chart. type K3sTraefikConfigTemplateData struct { Ports []types.PortMap + // Set to true before traefik chart v27 + ExposeBoolean bool } // Render will create the helm chart configuation for K3sTraefik. From 6e0ddedd982c763207c4bbaf2e95bc31b47e2756 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Fri, 15 Nov 2024 11:14:03 +0100 Subject: [PATCH 18/19] Run the first user creation from the setup script Running the first user creation from outside the container relies on the pod to be seen as ready by kubernetes... and sometimes it takes longer than others. Calling the API from the setup script inside the container allows to use localhost and not rely on ingress to route the request. --- .../templates/mgrSetupScriptTemplate.go | 39 ++++++++++- mgradm/shared/utils/setup.go | 65 ++++--------------- 2 files changed, 50 insertions(+), 54 deletions(-) diff --git a/mgradm/shared/templates/mgrSetupScriptTemplate.go b/mgradm/shared/templates/mgrSetupScriptTemplate.go index 3cd70ada8..1ad61c7eb 100644 --- a/mgradm/shared/templates/mgrSetupScriptTemplate.go +++ b/mgradm/shared/templates/mgrSetupScriptTemplate.go @@ -27,6 +27,34 @@ RESULT=$? # The CA needs to be added to the database for Kickstart use. /usr/bin/rhn-ssl-dbstore --ca-cert=/etc/pki/trust/anchors/LOCAL-RHN-ORG-TRUSTED-SSL-CERT +if test -n "{{ .AdminPassword }}"; then + {{ if .NoSSL }} + CURL_SCHEME="http" + {{ else }} + CURL_SCHEME="-k https" + {{ end }} + + HTTP_CODE=$(curl -o /dev/null -s -w %{http_code} $CURL_SCHEME://localhost/rhn/newlogin/CreateFirstUser.do) + if test "$HTTP_CODE" == "200"; then + echo "Creating administration user" + curl -s -o /tmp/curl_out \ + -d "orgName={{ .OrgName }}" \ + -d "adminLogin={{ .AdminLogin }}" \ + -d "adminPassword={{ .AdminPassword }}" \ + -d "firstName={{ .AdminFirstName }}" \ + -d "lastName={{ .AdminLastName }}" \ + -d "email={{ .AdminEmail }}" \ + $CURL_SCHEME://localhost/rhn/manager/api/org/createFirst + if ! grep -q '^{"success":true' /tmp/curl_out ; then + echo "Failed to create the administration user" + cat /tmp/curl_out + fi + rm -f /tmp/curl_out + elif test "$HTTP_CODE" == "403"; then + echo "Administration user already exists, reusing" + fi +fi + # clean before leaving rm $0 exit $RESULT @@ -34,8 +62,15 @@ exit $RESULT // MgrSetupScriptTemplateData represents information used to create setup script. type MgrSetupScriptTemplateData struct { - Env map[string]string - DebugJava bool + Env map[string]string + NoSSL bool + DebugJava bool + AdminPassword string + AdminLogin string + AdminFirstName string + AdminLastName string + AdminEmail string + OrgName string } // Render will create setup script. diff --git a/mgradm/shared/utils/setup.go b/mgradm/shared/utils/setup.go index a7fb5a534..85cfba3e0 100644 --- a/mgradm/shared/utils/setup.go +++ b/mgradm/shared/utils/setup.go @@ -5,17 +5,14 @@ package utils import ( - "errors" - "net/url" "path/filepath" "strconv" + "strings" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/uyuni-project/uyuni-tools/mgradm/shared/templates" "github.com/uyuni-project/uyuni-tools/shared" - "github.com/uyuni-project/uyuni-tools/shared/api" - "github.com/uyuni-project/uyuni-tools/shared/api/org" . "github.com/uyuni-project/uyuni-tools/shared/l10n" "github.com/uyuni-project/uyuni-tools/shared/utils" ) @@ -51,52 +48,7 @@ func RunSetup(cnx *shared.Connection, flags *ServerFlags, fqdn string, env map[s return utils.Errorf(err, L("failed to add SSL CA certificate to host trusted certificates")) } - installFlags := &flags.Installation - - // Call the org.createFirst api if flags are passed - // This should not happen since the password is queried and enforced - if installFlags.Admin.Password != "" { - apiCnx := api.ConnectionDetails{ - Server: fqdn, - Insecure: false, - User: installFlags.Admin.Login, - Password: installFlags.Admin.Password, - } - - // Check if there is already admin user with given password and organization with same name - client, err := api.Init(&apiCnx) - if err != nil { - log.Error().Err(err).Msgf(L("unable to prepare API client")) - } - if err = client.Login(); err == nil { - if _, err := org.GetOrganizationDetails(&apiCnx, installFlags.Organization); err == nil { - log.Info().Msgf(L("Server organization already exists, reusing")) - } else { - log.Debug().Err(err).Msg("Error returned by server") - log.Warn().Msgf( - L("Administration user already exists, but organization %s could not be found"), - installFlags.Organization, - ) - } - } else { - var connError *url.Error - if errors.As(err, &connError) { - // We were not able to connect to the server at all - return err - } - // We do not have any user existing, create one. CreateFirst skip user login - _, err := org.CreateFirst(&apiCnx, installFlags.Organization, &installFlags.Admin) - if err != nil { - if preconfigured { - log.Warn().Msgf(L("Administration user already exists, but provided credentials are not valid")) - } else { - return err - } - } - } - } - - log.Info().Msgf(L("Server set up, login on https://%[1]s with %[2]s user"), fqdn, installFlags.Admin.Login) + log.Info().Msgf(L("Server set up, login on https://%[1]s with %[2]s user"), fqdn, flags.Installation.Admin.Login) return nil } @@ -166,9 +118,18 @@ func generateSetupScript( return "", nil, err } + _, noSSL := env["NO_SSL"] + dataTemplate := templates.MgrSetupScriptTemplateData{ - Env: env, - DebugJava: flags.Debug.Java, + Env: env, + DebugJava: flags.Debug.Java, + OrgName: flags.Organization, + AdminLogin: flags.Admin.Login, + AdminPassword: strings.ReplaceAll(flags.Admin.Password, `"`, `\"`), + AdminFirstName: flags.Admin.FirstName, + AdminLastName: flags.Admin.LastName, + AdminEmail: flags.Admin.Email, + NoSSL: noSSL, } scriptPath := filepath.Join(scriptDir, setupName) From cd4d3cfe891bc425d39184bb7bbca11314d04767 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?C=C3=A9dric=20Bosdonnat?= Date: Fri, 15 Nov 2024 11:16:23 +0100 Subject: [PATCH 19/19] Remove the line end in the local timezone During the installation, there was a message indicating that the timezone from the host couldn't be set in the container. This was due to no removing the line end from the command output. --- shared/utils/utils.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/shared/utils/utils.go b/shared/utils/utils.go index 0f2957839..131286f07 100644 --- a/shared/utils/utils.go +++ b/shared/utils/utils.go @@ -258,7 +258,7 @@ func GetLocalTimezone() string { if err != nil { log.Fatal().Err(err).Msgf(L("Failed to run %s"), "timedatectl show --value -p Timezone") } - return string(out) + return strings.TrimSpace(string(out)) } // IsEmptyDirectory return true if a given directory is empty.