From 6405e74023b8fa29b3e6e696405f42e153f3ae06 Mon Sep 17 00:00:00 2001 From: Pavel Sturc Date: Thu, 12 Dec 2024 11:38:17 +0100 Subject: [PATCH] fix: adapt konflux-demo for konflux-ci --- tests/konflux-demo/config/scenarios.go | 19 ++ tests/konflux-demo/const.go | 3 +- tests/konflux-demo/konflux-demo.go | 258 +++++++++++++++++-------- 3 files changed, 196 insertions(+), 84 deletions(-) diff --git a/tests/konflux-demo/config/scenarios.go b/tests/konflux-demo/config/scenarios.go index 6242331eb..80e040539 100644 --- a/tests/konflux-demo/config/scenarios.go +++ b/tests/konflux-demo/config/scenarios.go @@ -29,3 +29,22 @@ var ApplicationSpecs = []ApplicationSpec{ }, }, } +var UpstreamAppSpecs = []ApplicationSpec{ + { + Name: "Test local instance of konflux-ci", + ApplicationName: "konflux-ci-upstream", + Skip: false, + ComponentSpec: ComponentSpec{ + Name: "konflux-ci-upstream", + GitSourceUrl: fmt.Sprintf("https://github.com/%s/%s", utils.GetEnv(constants.GITHUB_E2E_ORGANIZATION_ENV, "redhat-appstudio-qe"), "testrepo"), + GitSourceRevision: "ba3f8828d061a539ef774229d2f5c8651d854d7e", + GitSourceDefaultBranchName: "main", + DockerFilePath: "Dockerfile", + IntegrationTestScenario: IntegrationTestScenarioSpec{ + GitURL: fmt.Sprintf("https://github.com/%s/%s", utils.GetEnv(constants.GITHUB_E2E_ORGANIZATION_ENV, "redhat-appstudio-qe"), "testrepo"), + GitRevision: "ba3f8828d061a539ef774229d2f5c8651d854d7e", + TestPath: "integration-tests/testrepo-integration.yaml", + }, + }, + }, +} diff --git a/tests/konflux-demo/const.go b/tests/konflux-demo/const.go index 9ff811cf1..681a6dac9 100644 --- a/tests/konflux-demo/const.go +++ b/tests/konflux-demo/const.go @@ -24,5 +24,6 @@ const ( releasePollingInterval = time.Second * 1 // test metadata - devEnvTestLabel = "konflux" + devEnvTestLabel = "konflux" + upstreamKonfluxTestLabel = "upstream-konflux" ) diff --git a/tests/konflux-demo/konflux-demo.go b/tests/konflux-demo/konflux-demo.go index 87914254d..a696cdb49 100644 --- a/tests/konflux-demo/konflux-demo.go +++ b/tests/konflux-demo/konflux-demo.go @@ -1,6 +1,8 @@ package konflux_demo import ( + "context" + "encoding/base64" "encoding/json" "fmt" "os" @@ -13,6 +15,7 @@ import ( "github.com/redhat-appstudio/jvm-build-service/openshift-with-appstudio-test/e2e" jvmclientSet "github.com/redhat-appstudio/jvm-build-service/pkg/client/clientset/versioned" pipelineclientset "github.com/tektoncd/pipeline/pkg/client/clientset/versioned" + k8sErrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/client-go/kubernetes" "github.com/devfile/library/v2/pkg/util" @@ -36,6 +39,7 @@ import ( releaseApi "github.com/konflux-ci/release-service/api/v1alpha1" tektonapi "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1" + kubeapi "github.com/konflux-ci/e2e-tests/pkg/clients/kubernetes" e2eConfig "github.com/konflux-ci/e2e-tests/tests/konflux-demo/config" ) @@ -62,12 +66,20 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { //secret := &corev1.Secret{} fw := &framework.Framework{} + var kubeadminClient *framework.ControllerHub var buildPipelineAnnotation map[string]string var componentNewBaseBranch, gitRevision, componentRepositoryName, componentName string - for _, appSpec := range e2eConfig.ApplicationSpecs { + var appSpecs []e2eConfig.ApplicationSpec + if strings.Contains(GinkgoLabelFilter(), upstreamKonfluxTestLabel) { + appSpecs = e2eConfig.UpstreamAppSpecs + } else { + appSpecs = e2eConfig.ApplicationSpecs + } + + for _, appSpec := range appSpecs { appSpec := appSpec if appSpec.Skip { continue @@ -78,10 +90,22 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { if os.Getenv(constants.SKIP_PAC_TESTS_ENV) == "true" { Skip("Skipping this test due to configuration issue with Spray proxy") } - // Namespace config - fw, err = framework.NewFramework(utils.GetGeneratedNamespace(devEnvTestLabel)) - Expect(err).NotTo(HaveOccurred()) - userNamespace = fw.UserNamespace + if !strings.Contains(GinkgoLabelFilter(), upstreamKonfluxTestLabel) { + // Namespace config + fw, err = framework.NewFramework(utils.GetGeneratedNamespace(devEnvTestLabel)) + Expect(err).NotTo(HaveOccurred()) + userNamespace = fw.UserNamespace + kubeadminClient = fw.AsKubeAdmin + Expect(err).ShouldNot(HaveOccurred()) + } else { + var asAdminClient *kubeapi.CustomClient + userNamespace = os.Getenv(constants.E2E_APPLICATIONS_NAMESPACE_ENV) + asAdminClient, err = kubeapi.NewAdminKubernetesClient() + Expect(err).ShouldNot(HaveOccurred()) + kubeadminClient, err = framework.InitControllerHub(asAdminClient) + Expect(err).ShouldNot(HaveOccurred()) + _, err = kubeadminClient.CommonController.CreateTestNamespace(userNamespace) + } managedNamespace = userNamespace + "-managed" // Component config @@ -93,17 +117,22 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { // https://issues.redhat.com/browse/KFLUXBUGS-1462 - creating SCM secret alongside with PaC // leads to PLRs being duplicated // secretDefinition := build.GetSecretDefForGitHub(namespace) - // secret, err = fw.AsKubeAdmin.CommonController.CreateSecret(namespace, secretDefinition) - sharedSecret, err := fw.AsKubeAdmin.CommonController.GetSecret(constants.QuayRepositorySecretNamespace, constants.QuayRepositorySecretName) + // secret, err = kubeadminClient.CommonController.CreateSecret(namespace, secretDefinition) + sharedSecret, err := kubeadminClient.CommonController.GetSecret(constants.QuayRepositorySecretNamespace, constants.QuayRepositorySecretName) + if err != nil && k8sErrors.IsNotFound(err) { + sharedSecret, err = CreateE2EQuaySecret(kubeadminClient.CommonController.CustomClient) + Expect(err).ShouldNot(HaveOccurred()) + } Expect(err).ShouldNot(HaveOccurred(), fmt.Sprintf("error when getting shared secret - make sure the secret %s in %s userNamespace is created", constants.QuayRepositorySecretName, constants.QuayRepositorySecretNamespace)) - createReleaseConfig(*fw, managedNamespace, appSpec.ComponentSpec.Name, appSpec.ApplicationName, sharedSecret.Data[".dockerconfigjson"]) - - // JBS related config - _, err = fw.AsKubeAdmin.JvmbuildserviceController.CreateJBSConfig(constants.JBSConfigName, fw.UserNamespace) - Expect(err).ShouldNot(HaveOccurred()) - Expect(fw.AsKubeAdmin.JvmbuildserviceController.WaitForCache(fw.AsKubeAdmin.CommonController, fw.UserNamespace)).Should(Succeed()) + createReleaseConfig(kubeadminClient, managedNamespace, userNamespace, appSpec.ComponentSpec.Name, appSpec.ApplicationName, sharedSecret.Data[".dockerconfigjson"]) + // JBS related config for Java components + if appSpec.ComponentSpec.Language == "Java" { + _, err = kubeadminClient.JvmbuildserviceController.CreateJBSConfig(constants.JBSConfigName, userNamespace) + Expect(err).ShouldNot(HaveOccurred()) + Expect(kubeadminClient.JvmbuildserviceController.WaitForCache(kubeadminClient.CommonController, userNamespace)).Should(Succeed()) + } // get the build pipeline bundle annotation buildPipelineAnnotation = build.GetDockerBuildPipelineBundle() @@ -111,16 +140,16 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { // Remove all resources created by the tests AfterAll(func() { - if !(strings.EqualFold(os.Getenv("E2E_SKIP_CLEANUP"), "true")) && !CurrentSpecReport().Failed() { + if !(strings.EqualFold(os.Getenv("E2E_SKIP_CLEANUP"), "true")) && !CurrentSpecReport().Failed() && !strings.Contains(GinkgoLabelFilter(), upstreamKonfluxTestLabel) { Expect(fw.SandboxController.DeleteUserSignup(fw.UserName)).To(BeTrue()) - Expect(fw.AsKubeAdmin.CommonController.DeleteNamespace(managedNamespace)).To(Succeed()) + Expect(kubeadminClient.CommonController.DeleteNamespace(managedNamespace)).To(Succeed()) // Delete new branch created by PaC and a testing branch used as a component's base branch - err = fw.AsKubeAdmin.CommonController.Github.DeleteRef(componentRepositoryName, pacBranchName) + err = kubeadminClient.CommonController.Github.DeleteRef(componentRepositoryName, pacBranchName) if err != nil { Expect(err.Error()).To(ContainSubstring("Reference does not exist")) } - err = fw.AsKubeAdmin.CommonController.Github.DeleteRef(componentRepositoryName, componentNewBaseBranch) + err = kubeadminClient.CommonController.Github.DeleteRef(componentRepositoryName, componentNewBaseBranch) if err != nil { Expect(err.Error()).To(ContainSubstring("Reference does not exist")) } @@ -129,31 +158,31 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { }) // Create an application in a specific namespace - It("creates an application", Label(devEnvTestLabel), func() { - createdApplication, err := fw.AsKubeDeveloper.HasController.CreateApplication(appSpec.ApplicationName, userNamespace) + It("creates an application", Label(devEnvTestLabel, upstreamKonfluxTestLabel), func() { + createdApplication, err := kubeadminClient.HasController.CreateApplication(appSpec.ApplicationName, userNamespace) Expect(err).NotTo(HaveOccurred()) Expect(createdApplication.Spec.DisplayName).To(Equal(appSpec.ApplicationName)) Expect(createdApplication.Namespace).To(Equal(userNamespace)) }) // Create an IntegrationTestScenario for the App - It("creates an IntegrationTestScenario for the app", Label(devEnvTestLabel), func() { + It("creates an IntegrationTestScenario for the app", Label(devEnvTestLabel, upstreamKonfluxTestLabel), func() { its := appSpec.ComponentSpec.IntegrationTestScenario - integrationTestScenario, err = fw.AsKubeAdmin.IntegrationController.CreateIntegrationTestScenario("", appSpec.ApplicationName, fw.UserNamespace, its.GitURL, its.GitRevision, its.TestPath, []string{}) + integrationTestScenario, err = kubeadminClient.IntegrationController.CreateIntegrationTestScenario("", appSpec.ApplicationName, userNamespace, its.GitURL, its.GitRevision, its.TestPath, []string{}) Expect(err).ShouldNot(HaveOccurred()) }) - It("creates new branch for the build", Label(devEnvTestLabel), func() { + It("creates new branch for the build", Label(devEnvTestLabel, upstreamKonfluxTestLabel), func() { // We need to create a new branch that we will target // and that will contain the PaC configuration, so we // can avoid polluting the default (main) branch componentNewBaseBranch = fmt.Sprintf("base-%s", util.GenerateRandomString(6)) gitRevision = componentNewBaseBranch - Expect(fw.AsKubeAdmin.CommonController.Github.CreateRef(componentRepositoryName, appSpec.ComponentSpec.GitSourceDefaultBranchName, appSpec.ComponentSpec.GitSourceRevision, componentNewBaseBranch)).To(Succeed()) + Expect(kubeadminClient.CommonController.Github.CreateRef(componentRepositoryName, appSpec.ComponentSpec.GitSourceDefaultBranchName, appSpec.ComponentSpec.GitSourceRevision, componentNewBaseBranch)).To(Succeed()) }) // Component are imported from gitUrl - It(fmt.Sprintf("creates component %s (private: %t) from git source %s", appSpec.ComponentSpec.Name, appSpec.ComponentSpec.Private, appSpec.ComponentSpec.GitSourceUrl), Label(devEnvTestLabel), func() { + It(fmt.Sprintf("creates component %s (private: %t) from git source %s", appSpec.ComponentSpec.Name, appSpec.ComponentSpec.Private, appSpec.ComponentSpec.GitSourceUrl), Label(devEnvTestLabel, upstreamKonfluxTestLabel), func() { componentObj := appservice.ComponentSpec{ ComponentName: componentName, Application: appSpec.ApplicationName, @@ -169,15 +198,15 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { }, } - component, err = fw.AsKubeAdmin.HasController.CreateComponent(componentObj, userNamespace, "", "", appSpec.ApplicationName, false, utils.MergeMaps(constants.ComponentPaCRequestAnnotation, buildPipelineAnnotation)) + component, err = kubeadminClient.HasController.CreateComponent(componentObj, userNamespace, "", "", appSpec.ApplicationName, false, utils.MergeMaps(constants.ComponentPaCRequestAnnotation, buildPipelineAnnotation)) Expect(err).ShouldNot(HaveOccurred()) }) - When("Component is created", func() { + When("Component is created", Label(upstreamKonfluxTestLabel), func() { It("triggers creation of a PR in the sample repo", func() { var prSHA string Eventually(func() error { - prs, err := fw.AsKubeAdmin.CommonController.Github.ListPullRequests(componentRepositoryName) + prs, err := kubeadminClient.CommonController.Github.ListPullRequests(componentRepositoryName) Expect(err).ShouldNot(HaveOccurred()) for _, pr := range prs { if pr.Head.GetRef() == pacBranchName { @@ -191,19 +220,19 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { // We don't need the PipelineRun from a PaC 'pull-request' event to finish, so we can delete it Eventually(func() error { - pipelineRun, err = fw.AsKubeAdmin.HasController.GetComponentPipelineRun(component.GetName(), appSpec.ApplicationName, fw.UserNamespace, prSHA) + pipelineRun, err = kubeadminClient.HasController.GetComponentPipelineRun(component.GetName(), appSpec.ApplicationName, userNamespace, prSHA) if err == nil { - Expect(fw.AsKubeAdmin.TektonController.DeletePipelineRun(pipelineRun.Name, pipelineRun.Namespace)).To(Succeed()) + Expect(kubeadminClient.TektonController.DeletePipelineRun(pipelineRun.Name, pipelineRun.Namespace)).To(Succeed()) return nil } return err - }, pipelineRunStartedTimeout, constants.PipelineRunPollingInterval).Should(Succeed(), fmt.Sprintf("timed out when waiting for `pull-request` event type PaC PipelineRun to be present in the user namespace %q for component %q with a label pointing to %q", fw.UserNamespace, component.GetName(), appSpec.ApplicationName)) + }, pipelineRunStartedTimeout, constants.PipelineRunPollingInterval).Should(Succeed(), fmt.Sprintf("timed out when waiting for `pull-request` event type PaC PipelineRun to be present in the user namespace %q for component %q with a label pointing to %q", userNamespace, component.GetName(), appSpec.ApplicationName)) }) It("verifies component build status", func() { var buildStatus *buildcontrollers.BuildStatus Eventually(func() (bool, error) { - component, err := fw.AsKubeAdmin.HasController.GetComponent(component.GetName(), fw.UserNamespace) + component, err := kubeadminClient.HasController.GetComponent(component.GetName(), userNamespace) if err != nil { return false, err } @@ -231,50 +260,50 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { It("should eventually lead to triggering a 'push' event type PipelineRun after merging the PaC init branch ", func() { Eventually(func() error { - mergeResult, err = fw.AsKubeAdmin.CommonController.Github.MergePullRequest(componentRepositoryName, prNumber) + mergeResult, err = kubeadminClient.CommonController.Github.MergePullRequest(componentRepositoryName, prNumber) return err }, mergePRTimeout).Should(BeNil(), fmt.Sprintf("error when merging PaC pull request: %+v\n", err)) headSHA = mergeResult.GetSHA() Eventually(func() error { - pipelineRun, err = fw.AsKubeAdmin.HasController.GetComponentPipelineRun(component.GetName(), appSpec.ApplicationName, fw.UserNamespace, headSHA) + pipelineRun, err = kubeadminClient.HasController.GetComponentPipelineRun(component.GetName(), appSpec.ApplicationName, userNamespace, headSHA) if err != nil { - GinkgoWriter.Printf("PipelineRun has not been created yet for component %s/%s\n", fw.UserNamespace, component.GetName()) + GinkgoWriter.Printf("PipelineRun has not been created yet for component %s/%s\n", userNamespace, component.GetName()) return err } if !pipelineRun.HasStarted() { return fmt.Errorf("pipelinerun %s/%s hasn't started yet", pipelineRun.GetNamespace(), pipelineRun.GetName()) } return nil - }, pipelineRunStartedTimeout, constants.PipelineRunPollingInterval).Should(Succeed(), fmt.Sprintf("timed out when waiting for a PipelineRun in namespace %q with label component label %q and application label %q and sha label %q to start", fw.UserNamespace, component.GetName(), appSpec.ApplicationName, headSHA)) + }, pipelineRunStartedTimeout, constants.PipelineRunPollingInterval).Should(Succeed(), fmt.Sprintf("timed out when waiting for a PipelineRun in namespace %q with label component label %q and application label %q and sha label %q to start", userNamespace, component.GetName(), appSpec.ApplicationName, headSHA)) }) }) - When("Build PipelineRun is created", func() { + When("Build PipelineRun is created", Label(upstreamKonfluxTestLabel), func() { It("does not contain an annotation with a Snapshot Name", func() { Expect(pipelineRun.Annotations["appstudio.openshift.io/snapshot"]).To(Equal("")) }) It("should eventually complete successfully", func() { - Expect(fw.AsKubeAdmin.HasController.WaitForComponentPipelineToBeFinished(component, headSHA, - fw.AsKubeAdmin.TektonController, &has.RetryOptions{Retries: 5, Always: true}, pipelineRun)).To(Succeed()) + Expect(kubeadminClient.HasController.WaitForComponentPipelineToBeFinished(component, headSHA, + kubeadminClient.TektonController, &has.RetryOptions{Retries: 5, Always: true}, pipelineRun)).To(Succeed()) // in case the first pipelineRun attempt has failed and was retried, we need to update the git branch head ref headSHA = pipelineRun.Labels["pipelinesascode.tekton.dev/sha"] }) }) - When("Build PipelineRun completes successfully", func() { + When("Build PipelineRun completes successfully", Label(upstreamKonfluxTestLabel), func() { It("should validate Tekton TaskRun test results successfully", func() { - pipelineRun, err = fw.AsKubeAdmin.HasController.GetComponentPipelineRun(component.GetName(), appSpec.ApplicationName, fw.UserNamespace, headSHA) + pipelineRun, err = kubeadminClient.HasController.GetComponentPipelineRun(component.GetName(), appSpec.ApplicationName, userNamespace, headSHA) Expect(err).ShouldNot(HaveOccurred()) - Expect(build.ValidateBuildPipelineTestResults(pipelineRun, fw.AsKubeAdmin.CommonController.KubeRest(), false)).To(Succeed()) + Expect(build.ValidateBuildPipelineTestResults(pipelineRun, kubeadminClient.CommonController.KubeRest(), false)).To(Succeed()) }) It("should validate that the build pipelineRun is signed", func() { Eventually(func() error { - pipelineRun, err = fw.AsKubeAdmin.HasController.GetComponentPipelineRun(component.GetName(), appSpec.ApplicationName, fw.UserNamespace, headSHA) + pipelineRun, err = kubeadminClient.HasController.GetComponentPipelineRun(component.GetName(), appSpec.ApplicationName, userNamespace, headSHA) if err != nil { return err } @@ -288,22 +317,22 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { It("should find the related Snapshot CR", func() { Eventually(func() error { - snapshot, err = fw.AsKubeAdmin.IntegrationController.GetSnapshot("", pipelineRun.Name, "", fw.UserNamespace) + snapshot, err = kubeadminClient.IntegrationController.GetSnapshot("", pipelineRun.Name, "", userNamespace) return err - }, snapshotTimeout, snapshotPollingInterval).Should(Succeed(), "timed out when trying to check if the Snapshot exists for PipelineRun %s/%s", fw.UserNamespace, pipelineRun.GetName()) + }, snapshotTimeout, snapshotPollingInterval).Should(Succeed(), "timed out when trying to check if the Snapshot exists for PipelineRun %s/%s", userNamespace, pipelineRun.GetName()) }) It("should validate that the build pipelineRun is annotated with the name of the Snapshot", func() { - pipelineRun, err = fw.AsKubeAdmin.HasController.GetComponentPipelineRun(component.GetName(), appSpec.ApplicationName, fw.UserNamespace, headSHA) + pipelineRun, err = kubeadminClient.HasController.GetComponentPipelineRun(component.GetName(), appSpec.ApplicationName, userNamespace, headSHA) Expect(err).ShouldNot(HaveOccurred()) Expect(pipelineRun.Annotations["appstudio.openshift.io/snapshot"]).To(Equal(snapshot.GetName())) }) It("should find the related Integration Test PipelineRun", func() { Eventually(func() error { - testPipelinerun, err = fw.AsKubeAdmin.IntegrationController.GetIntegrationPipelineRun(integrationTestScenario.Name, snapshot.Name, fw.UserNamespace) + testPipelinerun, err = kubeadminClient.IntegrationController.GetIntegrationPipelineRun(integrationTestScenario.Name, snapshot.Name, userNamespace) if err != nil { - GinkgoWriter.Printf("failed to get Integration test PipelineRun for a snapshot '%s' in '%s' namespace: %+v\n", snapshot.Name, fw.UserNamespace, err) + GinkgoWriter.Printf("failed to get Integration test PipelineRun for a snapshot '%s' in '%s' namespace: %+v\n", snapshot.Name, userNamespace, err) return err } if !testPipelinerun.HasStarted() { @@ -316,33 +345,33 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { }) }) - When("Integration Test PipelineRun is created", func() { + When("Integration Test PipelineRun is created", Label(upstreamKonfluxTestLabel), func() { It("should eventually complete successfully", func() { - Expect(fw.AsKubeAdmin.IntegrationController.WaitForIntegrationPipelineToBeFinished(integrationTestScenario, snapshot, fw.UserNamespace)).To(Succeed(), fmt.Sprintf("Error when waiting for a integration pipeline for snapshot %s/%s to finish", fw.UserNamespace, snapshot.GetName())) + Expect(kubeadminClient.IntegrationController.WaitForIntegrationPipelineToBeFinished(integrationTestScenario, snapshot, userNamespace)).To(Succeed(), fmt.Sprintf("Error when waiting for a integration pipeline for snapshot %s/%s to finish", userNamespace, snapshot.GetName())) }) }) - When("Integration Test PipelineRun completes successfully", func() { + When("Integration Test PipelineRun completes successfully", Label(upstreamKonfluxTestLabel), func() { It("should lead to Snapshot CR being marked as passed", func() { Eventually(func() bool { - snapshot, err = fw.AsKubeAdmin.IntegrationController.GetSnapshot("", pipelineRun.Name, "", fw.UserNamespace) + snapshot, err = kubeadminClient.IntegrationController.GetSnapshot("", pipelineRun.Name, "", userNamespace) Expect(err).ShouldNot(HaveOccurred()) - return fw.AsKubeAdmin.CommonController.HaveTestsSucceeded(snapshot) + return kubeadminClient.CommonController.HaveTestsSucceeded(snapshot) }, time.Minute*5, defaultPollingInterval).Should(BeTrue(), fmt.Sprintf("tests have not succeeded for snapshot %s/%s", snapshot.GetNamespace(), snapshot.GetName())) }) It("should trigger creation of Release CR", func() { Eventually(func() error { - release, err = fw.AsKubeAdmin.ReleaseController.GetRelease("", snapshot.Name, fw.UserNamespace) + release, err = kubeadminClient.ReleaseController.GetRelease("", snapshot.Name, userNamespace) return err - }, releaseTimeout, releasePollingInterval).Should(Succeed(), fmt.Sprintf("timed out when trying to check if the release exists for snapshot %s/%s", fw.UserNamespace, snapshot.GetName())) + }, releaseTimeout, releasePollingInterval).Should(Succeed(), fmt.Sprintf("timed out when trying to check if the release exists for snapshot %s/%s", userNamespace, snapshot.GetName())) }) }) - When("Release CR is created", func() { + When("Release CR is created", Label(upstreamKonfluxTestLabel), func() { It("triggers creation of Release PipelineRun", func() { Eventually(func() error { - pipelineRun, err = fw.AsKubeAdmin.ReleaseController.GetPipelineRunInNamespace(managedNamespace, release.Name, release.Namespace) + pipelineRun, err = kubeadminClient.ReleaseController.GetPipelineRunInNamespace(managedNamespace, release.Name, release.Namespace) if err != nil { GinkgoWriter.Printf("pipelineRun for component '%s' in namespace '%s' not created yet: %+v\n", component.GetName(), managedNamespace, err) return err @@ -355,10 +384,10 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { }) }) - When("Release PipelineRun is triggered", func() { + When("Release PipelineRun is triggered", Label(upstreamKonfluxTestLabel), func() { It("should eventually succeed", func() { Eventually(func() error { - pr, err := fw.AsKubeAdmin.ReleaseController.GetPipelineRunInNamespace(managedNamespace, release.Name, release.Namespace) + pr, err := kubeadminClient.ReleaseController.GetPipelineRunInNamespace(managedNamespace, release.Name, release.Namespace) Expect(err).ShouldNot(HaveOccurred()) Expect(tekton.HasPipelineRunFailed(pr)).NotTo(BeTrue(), fmt.Sprintf("did not expect PipelineRun %s/%s to fail", pr.GetNamespace(), pr.GetName())) if !pr.IsDone() { @@ -370,36 +399,36 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { }) }) - When("Release PipelineRun is completed", func() { + When("Release PipelineRun is completed", Label(upstreamKonfluxTestLabel), func() { It("should lead to Release CR being marked as succeeded", func() { Eventually(func() error { - release, err = fw.AsKubeAdmin.ReleaseController.GetRelease(release.Name, "", fw.UserNamespace) + release, err = kubeadminClient.ReleaseController.GetRelease(release.Name, "", userNamespace) Expect(err).ShouldNot(HaveOccurred()) if !release.IsReleased() { return fmt.Errorf("release CR %s/%s is not marked as finished yet", release.GetNamespace(), release.GetName()) } return nil - }, customResourceUpdateTimeout, defaultPollingInterval).Should(Succeed(), fmt.Sprintf("failed to see release %q in namespace %q get marked as released", release.Name, fw.UserNamespace)) + }, customResourceUpdateTimeout, defaultPollingInterval).Should(Succeed(), fmt.Sprintf("failed to see release %q in namespace %q get marked as released", release.Name, userNamespace)) }) }) When("JVM Build Service is used for rebuilding dependencies", func() { // Marking the step as pending since jvm-build-service will be reworked with STONEBLD-2711 It("should eventually rebuild of all artifacts and dependencies successfully", Pending, func() { - jvmClient := jvmclientSet.New(fw.AsKubeAdmin.JvmbuildserviceController.JvmbuildserviceClient().JvmbuildserviceV1alpha1().RESTClient()) - tektonClient := pipelineclientset.New(fw.AsKubeAdmin.TektonController.PipelineClient().TektonV1beta1().RESTClient()) - kubeClient := kubernetes.New(fw.AsKubeAdmin.CommonController.KubeInterface().CoreV1().RESTClient()) + jvmClient := jvmclientSet.New(kubeadminClient.JvmbuildserviceController.JvmbuildserviceClient().JvmbuildserviceV1alpha1().RESTClient()) + tektonClient := pipelineclientset.New(kubeadminClient.TektonController.PipelineClient().TektonV1beta1().RESTClient()) + kubeClient := kubernetes.New(kubeadminClient.CommonController.KubeInterface().CoreV1().RESTClient()) //status report ends up in artifacts/redhat-appstudio-e2e/redhat-appstudio-e2e/artifacts/rp_preproc/attachments/xunit - defer e2e.GenerateStatusReport(fw.UserNamespace, jvmClient, kubeClient, tektonClient) + defer e2e.GenerateStatusReport(userNamespace, jvmClient, kubeClient, tektonClient) Eventually(func() error { - abList, err := fw.AsKubeAdmin.JvmbuildserviceController.ListArtifactBuilds(fw.UserNamespace) + abList, err := kubeadminClient.JvmbuildserviceController.ListArtifactBuilds(userNamespace) Expect(err).ShouldNot(HaveOccurred()) for _, ab := range abList.Items { if ab.Status.State != v1alpha1.ArtifactBuildStateComplete { return fmt.Errorf("artifactbuild %s not complete", ab.Spec.GAV) } } - dbList, err := fw.AsKubeAdmin.JvmbuildserviceController.ListDependencyBuilds(fw.UserNamespace) + dbList, err := kubeadminClient.JvmbuildserviceController.ListDependencyBuilds(userNamespace) Expect(err).ShouldNot(HaveOccurred()) for _, db := range dbList.Items { if db.Status.State != v1alpha1.DependencyBuildStateComplete { @@ -407,42 +436,42 @@ var _ = framework.KonfluxDemoSuiteDescribe(Label(devEnvTestLabel), func() { } } return nil - }, jvmRebuildTimeout, jvmRebuildPollingInterval).Should(Succeed(), fmt.Sprintf("timed out when waiting for all artifactbuilds and dependencybuilds to complete in namespace %q", fw.UserNamespace)) + }, jvmRebuildTimeout, jvmRebuildPollingInterval).Should(Succeed(), fmt.Sprintf("timed out when waiting for all artifactbuilds and dependencybuilds to complete in namespace %q", userNamespace)) }) }) }) } }) -func createReleaseConfig(fw framework.Framework, managedNamespace, componentName, appName string, secretData []byte) { +func createReleaseConfig(kubeadminClient *framework.ControllerHub, managedNamespace, userNamespace, componentName, appName string, secretData []byte) { var err error - _, err = fw.AsKubeAdmin.CommonController.CreateTestNamespace(managedNamespace) + _, err = kubeadminClient.CommonController.CreateTestNamespace(managedNamespace) Expect(err).ShouldNot(HaveOccurred()) secret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "release-pull-secret", Namespace: managedNamespace}, Data: map[string][]byte{".dockerconfigjson": secretData}, Type: corev1.SecretTypeDockerConfigJson, } - _, err = fw.AsKubeAdmin.CommonController.CreateSecret(managedNamespace, secret) + _, err = kubeadminClient.CommonController.CreateSecret(managedNamespace, secret) Expect(err).ShouldNot(HaveOccurred()) - managedServiceAccount, err := fw.AsKubeAdmin.CommonController.CreateServiceAccount("release-service-account", managedNamespace, []corev1.ObjectReference{{Name: secret.Name}}, nil) + managedServiceAccount, err := kubeadminClient.CommonController.CreateServiceAccount("release-service-account", managedNamespace, []corev1.ObjectReference{{Name: secret.Name}}, nil) Expect(err).NotTo(HaveOccurred()) - _, err = fw.AsKubeAdmin.ReleaseController.CreateReleasePipelineRoleBindingForServiceAccount(fw.UserNamespace, managedServiceAccount) + _, err = kubeadminClient.ReleaseController.CreateReleasePipelineRoleBindingForServiceAccount(userNamespace, managedServiceAccount) Expect(err).NotTo(HaveOccurred()) - _, err = fw.AsKubeAdmin.ReleaseController.CreateReleasePipelineRoleBindingForServiceAccount(managedNamespace, managedServiceAccount) + _, err = kubeadminClient.ReleaseController.CreateReleasePipelineRoleBindingForServiceAccount(managedNamespace, managedServiceAccount) Expect(err).NotTo(HaveOccurred()) - publicKey, err := fw.AsKubeAdmin.TektonController.GetTektonChainsPublicKey() + publicKey, err := kubeadminClient.TektonController.GetTektonChainsPublicKey() Expect(err).ToNot(HaveOccurred()) - Expect(fw.AsKubeAdmin.TektonController.CreateOrUpdateSigningSecret(publicKey, "cosign-public-key", managedNamespace)).To(Succeed()) + Expect(kubeadminClient.TektonController.CreateOrUpdateSigningSecret(publicKey, "cosign-public-key", managedNamespace)).To(Succeed()) - _, err = fw.AsKubeAdmin.ReleaseController.CreateReleasePlan("source-releaseplan", fw.UserNamespace, appName, managedNamespace, "", nil, nil, nil) + _, err = kubeadminClient.ReleaseController.CreateReleasePlan("source-releaseplan", userNamespace, appName, managedNamespace, "", nil, nil, nil) Expect(err).NotTo(HaveOccurred()) - defaultEcPolicy, err := fw.AsKubeAdmin.TektonController.GetEnterpriseContractPolicy("default", "enterprise-contract-service") + defaultEcPolicy, err := kubeadminClient.TektonController.GetEnterpriseContractPolicy("default", "enterprise-contract-service") Expect(err).NotTo(HaveOccurred()) ecPolicyName := componentName + "-policy" defaultEcPolicySpec := ecp.EnterpriseContractPolicySpec{ @@ -454,10 +483,10 @@ func createReleaseConfig(fw framework.Framework, managedNamespace, componentName Exclude: []string{"cve"}, }, } - _, err = fw.AsKubeAdmin.TektonController.CreateEnterpriseContractPolicy(ecPolicyName, managedNamespace, defaultEcPolicySpec) + _, err = kubeadminClient.TektonController.CreateEnterpriseContractPolicy(ecPolicyName, managedNamespace, defaultEcPolicySpec) Expect(err).NotTo(HaveOccurred()) - _, err = fw.AsKubeAdmin.ReleaseController.CreateReleasePlanAdmission("demo", managedNamespace, "", fw.UserNamespace, ecPolicyName, "release-service-account", []string{appName}, true, &tektonutils.PipelineRef{ + _, err = kubeadminClient.ReleaseController.CreateReleasePlanAdmission("demo", managedNamespace, "", userNamespace, ecPolicyName, "release-service-account", []string{appName}, true, &tektonutils.PipelineRef{ Resolver: "git", Params: []tektonutils.Param{ {Name: "url", Value: releasecommon.RelSvcCatalogURL}, @@ -467,16 +496,79 @@ func createReleaseConfig(fw framework.Framework, managedNamespace, componentName }, nil) Expect(err).NotTo(HaveOccurred()) - _, err = fw.AsKubeAdmin.TektonController.CreatePVCInAccessMode("release-pvc", managedNamespace, corev1.ReadWriteOnce) + _, err = kubeadminClient.TektonController.CreatePVCInAccessMode("release-pvc", managedNamespace, corev1.ReadWriteOnce) Expect(err).NotTo(HaveOccurred()) - _, err = fw.AsKubeAdmin.CommonController.CreateRole("role-release-service-account", managedNamespace, map[string][]string{ + _, err = kubeadminClient.CommonController.CreateRole("role-release-service-account", managedNamespace, map[string][]string{ "apiGroupsList": {""}, "roleResources": {"secrets"}, "roleVerbs": {"get", "list", "watch"}, }) Expect(err).NotTo(HaveOccurred()) - _, err = fw.AsKubeAdmin.CommonController.CreateRoleBinding("role-release-service-account-binding", managedNamespace, "ServiceAccount", "release-service-account", managedNamespace, "Role", "role-release-service-account", "rbac.authorization.k8s.io") + _, err = kubeadminClient.CommonController.CreateRoleBinding("role-release-service-account-binding", managedNamespace, "ServiceAccount", "release-service-account", managedNamespace, "Role", "role-release-service-account", "rbac.authorization.k8s.io") Expect(err).NotTo(HaveOccurred()) } + +func CreateE2EQuaySecret(k *kubeapi.CustomClient) (*corev1.Secret, error) { + var secret *corev1.Secret + + quayToken := os.Getenv("QUAY_TOKEN") + if quayToken == "" { + return nil, fmt.Errorf("failed to obtain quay token from 'QUAY_TOKEN' env; make sure the env exists") + } + + decodedToken, err := base64.StdEncoding.DecodeString(quayToken) + if err != nil { + return nil, fmt.Errorf("failed to decode quay token. Make sure that QUAY_TOKEN env contain a base64 token") + } + + namespace := constants.QuayRepositorySecretNamespace + _, err = k.KubeInterface().CoreV1().Namespaces().Get(context.Background(), namespace, metav1.GetOptions{}) + if err != nil { + if k8sErrors.IsNotFound(err) { + _, err := k.KubeInterface().CoreV1().Namespaces().Create(context.Background(), &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + }, metav1.CreateOptions{}) + if err != nil { + return nil, fmt.Errorf("error when creating namespace %s : %v", namespace, err) + } + } else { + return nil, fmt.Errorf("error when getting namespace %s : %v", namespace, err) + } + } + + secretName := constants.QuayRepositorySecretName + secret, err = k.KubeInterface().CoreV1().Secrets(namespace).Get(context.Background(), secretName, metav1.GetOptions{}) + + if err != nil { + if k8sErrors.IsNotFound(err) { + secret, err = k.KubeInterface().CoreV1().Secrets(namespace).Create(context.Background(), &corev1.Secret{ + ObjectMeta: metav1.ObjectMeta{ + Name: secretName, + Namespace: namespace, + }, + Type: corev1.SecretTypeDockerConfigJson, + Data: map[string][]byte{ + corev1.DockerConfigJsonKey: decodedToken, + }, + }, metav1.CreateOptions{}) + + if err != nil { + return nil, fmt.Errorf("error when creating secret %s : %v", secretName, err) + } + } else { + secret.Data = map[string][]byte{ + corev1.DockerConfigJsonKey: decodedToken, + } + secret, err = k.KubeInterface().CoreV1().Secrets(namespace).Update(context.Background(), secret, metav1.UpdateOptions{}) + if err != nil { + return nil, fmt.Errorf("error when updating secret '%s' namespace: %v", secretName, err) + } + } + } + + return secret, nil +}