Skip to content

Commit

Permalink
generate mca before testing rollout (open-cluster-management-io#657)
Browse files Browse the repository at this point in the history
Signed-off-by: haoqing0110 <[email protected]>
  • Loading branch information
haoqing0110 authored Oct 18, 2024
1 parent bc01437 commit 9181dbb
Showing 1 changed file with 129 additions and 132 deletions.
261 changes: 129 additions & 132 deletions test/integration/addon/addon_manager_template_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,24 +22,28 @@ import (
clusterv1beta1 "open-cluster-management.io/api/cluster/v1beta1"
)

var _ = ginkgo.Describe("Agent deploy", func() {
var clusterNames []string
var err error
var addonName string
var addonTemplateName string
var addonDeployConfigName string
var addonDeployConfigNamespace string
var placementName string
var placementNamespace string
var manifestWorkName string
var _ = ginkgo.Describe("Template deploy", func() {
var (
clusterNames []string
numberOfClusters int
addonName string
addonTemplateName string
addonDeployConfigName string
addonDeployConfigNamespace string
placementName string
placementNamespace string
manifestWorkName string
err error
)

ginkgo.BeforeEach(func() {
suffix := rand.String(5)
numberOfClusters = 5
addonName = fmt.Sprintf("addon-%s", suffix)
addonTemplateName = "hello-template"
addonDeployConfigName = "hello-config"
addonDeployConfigNamespace = "default"
placementName = fmt.Sprintf("ns-%s", suffix)
placementName = fmt.Sprintf("placement-%s", suffix)
placementNamespace = fmt.Sprintf("ns-%s", suffix)
manifestWorkName = fmt.Sprintf("%s-0", constants.DeployWorkNamePrefix(addonName))

Expand All @@ -49,66 +53,30 @@ var _ = ginkgo.Describe("Agent deploy", func() {
decoder := serializer.NewCodecFactory(s).UniversalDeserializer()

// prepare cluster
for i := 0; i < 2; i++ {
for i := 0; i < numberOfClusters; i++ {
managedClusterName := fmt.Sprintf("managedcluster-%s-%d", suffix, i)
clusterNames = append(clusterNames, managedClusterName)
err = createManagedCluster(hubClusterClient, managedClusterName)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
}

// prepare cma
// Create the cma without template first to ensure mca exist before starting the testing.
// This ensure the code could check the mca by the rollout order.
// If create the cma with template directly, the mca will be created in a random order and introduce
// flaky testing result.
cma := &addonapiv1alpha1.ClusterManagementAddOn{
ObjectMeta: metav1.ObjectMeta{
Name: addonName,
},
ObjectMeta: metav1.ObjectMeta{Name: addonName},
Spec: addonapiv1alpha1.ClusterManagementAddOnSpec{
SupportedConfigs: []addonapiv1alpha1.ConfigMeta{
{
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
Group: utils.AddOnTemplateGVR.Group,
Resource: utils.AddOnTemplateGVR.Resource,
},
},
{
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
Group: utils.AddOnDeploymentConfigGVR.Group,
Resource: utils.AddOnDeploymentConfigGVR.Resource,
},
},
{ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
Group: utils.AddOnTemplateGVR.Group, Resource: utils.AddOnTemplateGVR.Resource}},
{ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
Group: utils.AddOnDeploymentConfigGVR.Group, Resource: utils.AddOnDeploymentConfigGVR.Resource}},
},
InstallStrategy: addonapiv1alpha1.InstallStrategy{
Type: addonapiv1alpha1.AddonInstallStrategyPlacements,
Placements: []addonapiv1alpha1.PlacementStrategy{
{
PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementName, Namespace: placementNamespace},
RolloutStrategy: clusterv1alpha1.RolloutStrategy{
Type: clusterv1alpha1.Progressive,
Progressive: &clusterv1alpha1.RolloutProgressive{
MaxConcurrency: intstr.FromInt(1),
},
},
Configs: []addonapiv1alpha1.AddOnConfig{
{
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
Group: utils.AddOnTemplateGVR.Group,
Resource: utils.AddOnTemplateGVR.Resource,
},
ConfigReferent: addonapiv1alpha1.ConfigReferent{
Name: addonTemplateName,
},
},
{
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
Group: utils.AddOnDeploymentConfigGVR.Group,
Resource: utils.AddOnDeploymentConfigGVR.Resource,
},
ConfigReferent: addonapiv1alpha1.ConfigReferent{
Name: addonDeployConfigName,
Namespace: addonDeployConfigNamespace,
},
},
},
},
{PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementName, Namespace: placementNamespace}},
},
},
},
Expand All @@ -119,16 +87,13 @@ var _ = ginkgo.Describe("Agent deploy", func() {
assertClusterManagementAddOnAnnotations(addonName)

// prepare addon template
var addonTemplate *addonapiv1alpha1.AddOnTemplate
data, err := os.ReadFile("./test/integration/addon/testmanifests/addontemplate.yaml")
addonTemplateData, err := os.ReadFile("./test/integration/addon/testmanifests/addontemplate.yaml")
gomega.Expect(err).ToNot(gomega.HaveOccurred())

addonTemplate = &addonapiv1alpha1.AddOnTemplate{
ObjectMeta: metav1.ObjectMeta{
Name: addonTemplateName,
},
addonTemplate := &addonapiv1alpha1.AddOnTemplate{
ObjectMeta: metav1.ObjectMeta{Name: addonTemplateName},
}
_, _, err = decoder.Decode(data, nil, addonTemplate)
_, _, err = decoder.Decode(addonTemplateData, nil, addonTemplate)
gomega.Expect(err).ToNot(gomega.HaveOccurred())

_, err = hubAddonClient.AddonV1alpha1().AddOnTemplates().Create(context.Background(), addonTemplate, metav1.CreateOptions{})
Expand All @@ -143,28 +108,16 @@ var _ = ginkgo.Describe("Agent deploy", func() {
Spec: addonapiv1alpha1.AddOnDeploymentConfigSpec{
AgentInstallNamespace: "test-install-namespace",
CustomizedVariables: []addonapiv1alpha1.CustomizedVariable{
{
Name: "LOG_LEVEL",
Value: "4",
},
{Name: "LOG_LEVEL", Value: "4"},
},
NodePlacement: &addonapiv1alpha1.NodePlacement{
NodeSelector: map[string]string{
"host": "ssd",
},
NodeSelector: map[string]string{"host": "ssd"},
Tolerations: []corev1.Toleration{
{
Key: "foo",
Operator: corev1.TolerationOpExists,
Effect: corev1.TaintEffectNoExecute,
},
{Key: "foo", Operator: corev1.TolerationOpExists, Effect: corev1.TaintEffectNoExecute},
},
},
Registries: []addonapiv1alpha1.ImageMirror{
{
Source: "quay.io/open-cluster-management",
Mirror: "quay.io/ocm",
},
{Source: "quay.io/open-cluster-management", Mirror: "quay.io/ocm"},
},
},
}
Expand All @@ -183,7 +136,7 @@ var _ = ginkgo.Describe("Agent deploy", func() {
gomega.Expect(err).ToNot(gomega.HaveOccurred())

// prepare placement decisions
err = createPlacementDecision(hubClusterClient, placementNamespace, placementName, "0", clusterNames[0], clusterNames[1])
err = createPlacementDecision(hubClusterClient, placementNamespace, placementName, "0", clusterNames...)
gomega.Expect(err).ToNot(gomega.HaveOccurred())
})

Expand All @@ -193,7 +146,6 @@ var _ = ginkgo.Describe("Agent deploy", func() {
gomega.Expect(err).ToNot(gomega.HaveOccurred())
err = hubClusterClient.ClusterV1().ManagedClusters().Delete(context.Background(), managedClusterName, metav1.DeleteOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())
delete(testAddOnConfigsImpl.registrations, managedClusterName)
}

err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Delete(context.Background(),
Expand All @@ -202,48 +154,82 @@ var _ = ginkgo.Describe("Agent deploy", func() {
})

ginkgo.It("Should deploy agent for addon template", func() {
ginkgo.By("check mca created")
gomega.Eventually(func() error {
for i := 0; i < numberOfClusters; i++ {
_, err := hubAddonClient.AddonV1alpha1().ManagedClusterAddOns(clusterNames[i]).Get(context.Background(), addonName, metav1.GetOptions{})
if err != nil {
return err
}
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())

ginkgo.By("check no work rendered")
for i := 0; i < numberOfClusters; i++ {
checkWorkRendered(clusterNames[i], 0)
}

ginkgo.By("update cma")
clusterManagementAddon, err := hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Get(context.Background(), addonName, metav1.GetOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())

clusterManagementAddon.Spec.InstallStrategy = addonapiv1alpha1.InstallStrategy{
Type: addonapiv1alpha1.AddonInstallStrategyPlacements,
Placements: []addonapiv1alpha1.PlacementStrategy{{
PlacementRef: addonapiv1alpha1.PlacementRef{Name: placementName, Namespace: placementNamespace},
RolloutStrategy: clusterv1alpha1.RolloutStrategy{
Type: clusterv1alpha1.Progressive,
Progressive: &clusterv1alpha1.RolloutProgressive{MaxConcurrency: intstr.FromInt(1)}},
Configs: []addonapiv1alpha1.AddOnConfig{
{
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
Group: utils.AddOnTemplateGVR.Group,
Resource: utils.AddOnTemplateGVR.Resource,
},
ConfigReferent: addonapiv1alpha1.ConfigReferent{
Name: addonTemplateName,
},
},
{
ConfigGroupResource: addonapiv1alpha1.ConfigGroupResource{
Group: utils.AddOnDeploymentConfigGVR.Group,
Resource: utils.AddOnDeploymentConfigGVR.Resource,
},
ConfigReferent: addonapiv1alpha1.ConfigReferent{
Name: addonDeployConfigName,
Namespace: addonDeployConfigNamespace,
},
},
},
},
},
}

_, err = hubAddonClient.AddonV1alpha1().ClusterManagementAddOns().Update(context.Background(), clusterManagementAddon, metav1.UpdateOptions{})
gomega.Expect(err).ToNot(gomega.HaveOccurred())

ginkgo.By("check mca condition")
assertManagedClusterAddOnConditions(addonName, clusterNames[0], metav1.Condition{
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
Status: metav1.ConditionTrue,
Reason: "ConfigurationsConfigured",
Message: "Configurations configured",
})
assertManagedClusterAddOnConditions(addonName, clusterNames[1], metav1.Condition{
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
Status: metav1.ConditionFalse,
Reason: "ConfigurationsNotConfigured",
Message: "Configurations updated and not configured yet",
})

ginkgo.By("check only 1 work rendered")
gomega.Eventually(func() error {
work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[0]).List(
context.Background(), metav1.ListOptions{})
if err != nil {
return err
}

if len(work.Items) != 1 {
return fmt.Errorf("Expect 1 work but get %v", work.Items)
}

return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())

gomega.Eventually(func() error {
work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[1]).List(
context.Background(), metav1.ListOptions{})
if err != nil {
return err
}

if len(work.Items) != 0 {
return fmt.Errorf("Expect 0 work but get %v", work.Items)
}
for i := 1; i < numberOfClusters; i++ {
assertManagedClusterAddOnConditions(addonName, clusterNames[i], metav1.Condition{
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
Status: metav1.ConditionFalse,
Reason: "ConfigurationsNotConfigured",
Message: "Configurations updated and not configured yet",
})
}

return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
ginkgo.By("check work rendered on the first cluster")
checkWorkRendered(clusterNames[0], 1)
for i := 1; i < numberOfClusters; i++ {
checkWorkRendered(clusterNames[i], 0)
}

ginkgo.By("update work status to trigger addon status")
updateManifestWorkStatus(hubWorkClient, clusterNames[0], manifestWorkName, metav1.ConditionTrue)
Expand All @@ -255,21 +241,32 @@ var _ = ginkgo.Describe("Agent deploy", func() {
Reason: "ConfigurationsConfigured",
Message: "Configurations configured",
})
for i := 2; i < numberOfClusters; i++ {
assertManagedClusterAddOnConditions(addonName, clusterNames[i], metav1.Condition{
Type: addonapiv1alpha1.ManagedClusterAddOnConditionConfigured,
Status: metav1.ConditionFalse,
Reason: "ConfigurationsNotConfigured",
Message: "Configurations updated and not configured yet",
})
}

ginkgo.By("check rendered work")
gomega.Eventually(func() error {
work, err := hubWorkClient.WorkV1().ManifestWorks(clusterNames[1]).List(
context.Background(), metav1.ListOptions{})
if err != nil {
return err
}

if len(work.Items) != 1 {
return fmt.Errorf("Expect 1 work but get %v", work.Items)
}

return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
ginkgo.By("check work rendered on the second cluster")
checkWorkRendered(clusterNames[1], 1)
for i := 2; i < numberOfClusters; i++ {
checkWorkRendered(clusterNames[i], 0)
}
})

})

func checkWorkRendered(clusterName string, expectedCount int) {
gomega.Eventually(func() error {
work, err := hubWorkClient.WorkV1().ManifestWorks(clusterName).List(context.Background(), metav1.ListOptions{})
if err != nil {
return err
}
if len(work.Items) != expectedCount {
return fmt.Errorf("Expected %d work but got %v", expectedCount, work.Items)
}
return nil
}, eventuallyTimeout, eventuallyInterval).ShouldNot(gomega.HaveOccurred())
}

0 comments on commit 9181dbb

Please sign in to comment.