From 863de56b57a302e957916cec7516273344559bd0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nils=20Gustav=20Str=C3=A5b=C3=B8?= Date: Wed, 16 Aug 2023 15:13:38 +0200 Subject: [PATCH] fix stop/start bug with HPA enabled components --- charts/radix-operator/Chart.yaml | 4 +- pkg/apis/deployment/deployment_test.go | 105 +++++++++++-------------- pkg/apis/deployment/kubedeployment.go | 9 ++- 3 files changed, 54 insertions(+), 64 deletions(-) diff --git a/charts/radix-operator/Chart.yaml b/charts/radix-operator/Chart.yaml index 554d7e02c..d508326db 100644 --- a/charts/radix-operator/Chart.yaml +++ b/charts/radix-operator/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: radix-operator -version: 1.20.3 -appVersion: 1.40.3 +version: 1.20.4 +appVersion: 1.40.4 kubeVersion: ">=1.24.0" description: Radix Operator keywords: diff --git a/pkg/apis/deployment/deployment_test.go b/pkg/apis/deployment/deployment_test.go index df5f562fc..d4daf2a38 100644 --- a/pkg/apis/deployment/deployment_test.go +++ b/pkg/apis/deployment/deployment_test.go @@ -1603,6 +1603,52 @@ func TestObjectSynced_DeploymentReplicasFromCurrentDeploymentWhenHPAEnabled(t *t assert.Equal(t, int32(1), *comp1.Spec.Replicas) } +func TestObjectSynced_StopAndStartDeploymentWhenHPAEnabled(t *testing.T) { + tu, client, kubeUtil, radixclient, prometheusclient, _ := setupTest() + defer teardownTest() + envNamespace := utils.GetEnvironmentNamespace("anyapp", "test") + + // Initial sync creating deployments should use replicas from spec + _, err := applyDeploymentWithSync(tu, client, kubeUtil, radixclient, prometheusclient, utils.ARadixDeployment(). + WithDeploymentName("deployment1"). + WithAppName("anyapp"). + WithEnvironment("test"). + WithComponents( + utils.NewDeployComponentBuilder().WithName("comp1").WithReplicas(pointers.Ptr(1)).WithHorizontalScaling(pointers.Ptr(int32(2)), int32(4), nil, nil), + )) + require.NoError(t, err) + + comp1, _ := client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp1", metav1.GetOptions{}) + assert.Equal(t, int32(2), *comp1.Spec.Replicas) + + // Resync existing RD with replicas 0 (stop) should set deployment replicas to 0 + err = applyDeploymentUpdateWithSync(tu, client, kubeUtil, radixclient, prometheusclient, utils.ARadixDeployment(). + WithDeploymentName("deployment1"). + WithAppName("anyapp"). + WithEnvironment("test"). + WithComponents( + utils.NewDeployComponentBuilder().WithName("comp1").WithReplicas(pointers.Ptr(0)).WithHorizontalScaling(pointers.Ptr(int32(1)), int32(4), nil, nil), + )) + require.NoError(t, err) + + comp1, _ = client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp1", metav1.GetOptions{}) + assert.Equal(t, int32(0), *comp1.Spec.Replicas) + + // Resync existing RD with replicas set back to original value (start) should use replicas from spec + err = applyDeploymentUpdateWithSync(tu, client, kubeUtil, radixclient, prometheusclient, utils.ARadixDeployment(). + WithDeploymentName("deployment1"). + WithAppName("anyapp"). + WithEnvironment("test"). + WithComponents( + utils.NewDeployComponentBuilder().WithName("comp1").WithReplicas(pointers.Ptr(1)).WithHorizontalScaling(pointers.Ptr(int32(2)), int32(4), nil, nil), + )) + require.NoError(t, err) + + comp1, _ = client.AppsV1().Deployments(envNamespace).Get(context.TODO(), "comp1", metav1.GetOptions{}) + assert.Equal(t, int32(2), *comp1.Spec.Replicas) + +} + func TestObjectSynced_DeploymentRevisionHistoryLimit(t *testing.T) { tu, client, kubeUtil, radixclient, prometheusclient, _ := setupTest() defer teardownTest() @@ -2575,65 +2621,6 @@ func TestHistoryLimit_IsBroken_FixedAmountOfDeployments(t *testing.T) { teardownTest() } -// func TestObjectUpdated_WithIngressConfig_AnnotationIsPutOnIngresses(t *testing.T) { -// tu, client, kubeUtil, radixclient, prometheusclient, _ := setupTest() -// -// // Setup -// client.CoreV1().ConfigMaps(corev1.NamespaceDefault).Create( -// context.TODO(), -// &corev1.ConfigMap{ -// ObjectMeta: metav1.ObjectMeta{ -// Name: ingressConfigurationMap, -// Namespace: corev1.NamespaceDefault, -// }, -// Data: map[string]string{ -// "ingressConfiguration": testIngressConfiguration, -// }, -// }, -// metav1.CreateOptions{}) -// -// os.Setenv(defaults.ActiveClusternameEnvironmentVariable, clusterName) -// applyDeploymentWithSync(tu, client, kubeUtil, radixclient, prometheusclient, utils.ARadixDeployment(). -// WithAppName("any-app"). -// WithEnvironment("dev"). -// WithComponents( -// utils.NewDeployComponentBuilder(). -// WithName("frontend"). -// WithPort("http", 8080). -// WithPublicPort("http"). -// WithDNSAppAlias(true). -// WithIngressConfiguration("non-existing"))) -// -// applyDeploymentWithSync(tu, client, kubeUtil, radixclient, prometheusclient, utils.ARadixDeployment(). -// WithAppName("any-app-2"). -// WithEnvironment("dev"). -// WithComponents( -// utils.NewDeployComponentBuilder(). -// WithName("frontend"). -// WithPort("http", 8080). -// WithPublicPort("http"). -// WithDNSAppAlias(true). -// WithIngressConfiguration("socket"))) -// -// // Test -// ingresses, _ := client.NetworkingV1().Ingresses(utils.GetEnvironmentNamespace("any-app", "dev")).List(context.TODO(), metav1.ListOptions{}) -// appAliasIngress := getIngressByName("any-app-url-alias", ingresses) -// clusterSpecificIngress := getIngressByName("frontend", ingresses) -// activeClusterIngress := getIngressByName("frontend-active-cluster-url-alias", ingresses) -// assert.Equal(t, 1, len(appAliasIngress.ObjectMeta.Annotations)) -// assert.Equal(t, 1, len(clusterSpecificIngress.ObjectMeta.Annotations)) -// assert.Equal(t, 1, len(activeClusterIngress.ObjectMeta.Annotations)) -// -// ingresses, _ = client.NetworkingV1().Ingresses(utils.GetEnvironmentNamespace("any-app-2", "dev")).List(context.TODO(), metav1.ListOptions{}) -// appAliasIngress = getIngressByName("any-app-2-url-alias", ingresses) -// clusterSpecificIngress = getIngressByName("frontend", ingresses) -// activeClusterIngress = getIngressByName("frontend-active-cluster-url-alias", ingresses) -// assert.Equal(t, 4, len(appAliasIngress.ObjectMeta.Annotations)) -// assert.Equal(t, 4, len(clusterSpecificIngress.ObjectMeta.Annotations)) -// assert.Equal(t, 4, len(activeClusterIngress.ObjectMeta.Annotations)) -// -// } - func TestHPAConfig(t *testing.T) { tu, client, kubeUtil, radixclient, prometheusclient, _ := setupTest() defer teardownTest() diff --git a/pkg/apis/deployment/kubedeployment.go b/pkg/apis/deployment/kubedeployment.go index c173e75b0..855770c2a 100644 --- a/pkg/apis/deployment/kubedeployment.go +++ b/pkg/apis/deployment/kubedeployment.go @@ -163,9 +163,12 @@ func (deploy *Deployment) getDesiredUpdatedDeploymentConfig(deployComponent v1.R err := deploy.setDesiredDeploymentProperties(deployComponent, desiredDeployment) // When HPA is enabled for a component, the HPA controller will scale the Deployment up/down by changing Replicas - // We must keep this value - if hs := deployComponent.GetHorizontalScaling(); hs != nil { - desiredDeployment.Spec.Replicas = currentDeployment.Spec.Replicas + // We must keep this value as long as replicas >= 0. + // Current replicas will be 0 if the component was previously stopped (replicas set explicitly to 0) + if hs := deployComponent.GetHorizontalScaling(); hs != nil && !isComponentStopped(deployComponent) { + if replicas := currentDeployment.Spec.Replicas; replicas != nil && *replicas > 0 { + desiredDeployment.Spec.Replicas = currentDeployment.Spec.Replicas + } } return desiredDeployment, err