Skip to content

Commit

Permalink
Separate pipeline for config map tests.
Browse files Browse the repository at this point in the history
  • Loading branch information
tecarter94 committed Jul 11, 2024
1 parent e2cf1a1 commit fa7b78c
Show file tree
Hide file tree
Showing 2 changed files with 171 additions and 112 deletions.
60 changes: 57 additions & 3 deletions .github/workflows/minikube.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,6 @@ jobs:
- "test-repos"
- "build-systems"
- "commons"
cfgmaptestsets:
- "jakartaee"
needs: [wait-for-images]
runs-on: ubuntu-latest
name: Minikube GAV Matrix
Expand Down Expand Up @@ -79,6 +77,62 @@ jobs:
docker tag quay.io/redhat-appstudio/pull-request-builds:jvmcontroller-${{ github.event.pull_request.head.sha }} quay.io/minikube/hacbs-jvm-controller:dev
export TESTSET=${{ matrix.testsets }}
./deploy/minikube-ci.sh
make minikube-test
- name: Archive Report
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4
if: always()
with:
name: matrix-report-${{ matrix.testsets }}
path: /tmp/jvm-build-service-report
run-configmap-based-tests:
strategy:
fail-fast: false
matrix:
cfgmaptestsets:
- "jakartaee"
needs: [wait-for-images]
runs-on: ubuntu-latest
name: Minikube Config Map Matrix
steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@54081f138730dfa15788a46383842cd2f914a1be # v1.3.1
with:
android: true
dotnet: true
haskell: true
docker-images: false
large-packages: false
swap-storage: false
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4
- name: Setup Registry
run: |
export DEV_IP=172.16.1.1
sudo ifconfig lo:0 $DEV_IP
docker run -d -p 5000:5000 --restart=always registry:2
sudo echo '{ "insecure-registries": ["172.16.1.1:5000"] }' | sudo tee /etc/docker/daemon.json
- name: Start minikube
uses: medyagh/setup-minikube@d8c0eb871f6f455542491d86a574477bd3894533 # v0.0.18
with:
cpus: max
memory: max
insecure-registry: '172.16.1.1:5000'
- name: Run Tests
run: |
export SHELL=/bin/bash
export QUAY_USERNAME=minikube
export DEV_IP=172.16.1.1
eval $(minikube -p minikube docker-env)
docker pull quay.io/redhat-appstudio/pull-request-builds:jvmbuildrequestp-${{ github.event.pull_request.head.sha }}
docker pull quay.io/redhat-appstudio/pull-request-builds:jvmcache-${{ github.event.pull_request.head.sha }}
docker pull quay.io/redhat-appstudio/pull-request-builds:jvmcontroller-${{ github.event.pull_request.head.sha }}
docker tag quay.io/redhat-appstudio/pull-request-builds:jvmbuildrequestp-${{ github.event.pull_request.head.sha }} quay.io/minikube/hacbs-jvm-build-request-processor:dev
docker tag quay.io/redhat-appstudio/pull-request-builds:jvmcache-${{ github.event.pull_request.head.sha }} quay.io/minikube/hacbs-jvm-cache:dev
docker tag quay.io/redhat-appstudio/pull-request-builds:jvmcontroller-${{ github.event.pull_request.head.sha }} quay.io/minikube/hacbs-jvm-controller:dev
export CFGMAPTESTSET=${{ matrix.cfgmaptestsets }}
./deploy/minikube-ci.sh
Expand All @@ -87,5 +141,5 @@ jobs:
uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4
if: always()
with:
name: matrix-report-${{ matrix.testsets }}-${{ matrix.cfgmaptestsets }}
name: matrix-report-${{ matrix.cfgmaptestsets }}
path: /tmp/jvm-build-service-report
223 changes: 114 additions & 109 deletions openshift-with-appstudio-test/e2e/basictests.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,18 @@ func runPipelineTests(t *testing.T, doSetup func(t *testing.T, namespace string)
}
ta.Logf(fmt.Sprintf("current working dir: %s", path))

testSet := os.Getenv("CFGMAPTESTSET")
// run config map dependency build tests instead if env var is set
if len(testSet) > 0 {
runCfgMapTests(path, testSet, ta)
} else {
runTests(path, os.Getenv("TESTSET"), pipeline, ta)
}
}

func runTests(path string, testSet string, pipeline string, ta *testArgs) {
var err error

runYamlPath := filepath.Join(path, "..", "..", "hack", "examples", pipeline)
ta.run = &tektonpipeline.PipelineRun{}
var ok bool
Expand All @@ -57,7 +69,6 @@ func runPipelineTests(t *testing.T, doSetup func(t *testing.T, namespace string)
debugAndFailTest(ta, fmt.Sprintf("file %s did not produce a pipelinerun: %#v", runYamlPath, obj))
}

testSet := os.Getenv("TESTSET")
//if the GAVS env var is set then we just create pre-defined GAVS
//otherwise we do a full build of a sample project
if len(testSet) > 0 {
Expand Down Expand Up @@ -169,10 +180,6 @@ func runPipelineTests(t *testing.T, doSetup func(t *testing.T, namespace string)
}
})

// run config map dependency build tests if env var is set
cfgMapTestSet := os.Getenv("CFGMAPTESTSET")
runCfgMapTests(path, cfgMapTestSet, ta)

if len(testSet) > 0 {
//no futher checks required here
//we are just checking that the GAVs in question actually build
Expand Down Expand Up @@ -550,133 +557,131 @@ func runPipelineTests(t *testing.T, doSetup func(t *testing.T, namespace string)
}

func runCfgMapTests(path string, testSet string, ta *testArgs) {
if len(testSet) > 0 {
parts := readTestData(path, testSet, "minikube-cfgmap.yaml", ta)
for _, s := range parts {
depBuildBytes, err := os.ReadFile(filepath.Clean(filepath.Join(path, s+"-dependencybuild.yaml")))
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("unable to read dependencybuild for %s configmap test: %s", s, err.Error()))
return
}
db := v1alpha1.DependencyBuild{}
err = yaml.Unmarshal(depBuildBytes, &db)
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("unable to unmarshal dependencybuild for %s configmap test: %s", s, err.Error()))
return
}
buildRecipeBytes, err := os.ReadFile(filepath.Clean(filepath.Join(path, s+"-buildrecipe.yaml")))
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("unable to read buildrecipe for %s configmap test: %s", s, err.Error()))
return
}
buildRecipe := v1alpha1.BuildRecipe{}
err = yaml.Unmarshal(buildRecipeBytes, &buildRecipe)
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("unable to unmarshal buildrecipe for %s configmap test: %s", s, err.Error()))
return
parts := readTestData(path, testSet, "minikube-cfgmap.yaml", ta)
for _, s := range parts {
depBuildBytes, err := os.ReadFile(filepath.Clean(filepath.Join(path, s+"-dependencybuild.yaml")))
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("unable to read dependencybuild for %s configmap test: %s", s, err.Error()))
return
}
db := v1alpha1.DependencyBuild{}
err = yaml.Unmarshal(depBuildBytes, &db)
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("unable to unmarshal dependencybuild for %s configmap test: %s", s, err.Error()))
return
}
buildRecipeBytes, err := os.ReadFile(filepath.Clean(filepath.Join(path, s+"-buildrecipe.yaml")))
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("unable to read buildrecipe for %s configmap test: %s", s, err.Error()))
return
}
buildRecipe := v1alpha1.BuildRecipe{}
err = yaml.Unmarshal(buildRecipeBytes, &buildRecipe)
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("unable to unmarshal buildrecipe for %s configmap test: %s", s, err.Error()))
return
}
db.Namespace = ta.ns
db.Name = util.HashString(db.Spec.ScmInfo.SCMURL + db.Spec.ScmInfo.Tag + db.Spec.ScmInfo.Path)
db.Spec.BuildRecipeConfigMap = db.Name + "configmap"
cfgMap := corev1.ConfigMap{}
cfgMap.Name = db.Spec.BuildRecipeConfigMap
cfgMap.Namespace = ta.ns
cfgMap.Data = map[string]string{"build.yaml": string(buildRecipeBytes)}
_, err = kubeClient.CoreV1().ConfigMaps(ta.ns).Create(context.TODO(), &cfgMap, metav1.CreateOptions{})
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("unable to create configmap %s for dependencybuild repo %s: %s", cfgMap.Name, db.Spec.ScmInfo.SCMURL, err.Error()))
return
}
err = wait.PollUntilContextTimeout(context.TODO(), ta.interval, ta.timeout, true, func(ctx context.Context) (done bool, err error) {
retrievedCfgMap, err := kubeClient.CoreV1().ConfigMaps(ta.ns).Get(context.TODO(), cfgMap.Name, metav1.GetOptions{})
if retrievedCfgMap != nil {
ta.Logf(fmt.Sprintf("successfully retrieved configmap %s for dependencybuild repo %s", cfgMap.Name, db.Spec.ScmInfo.SCMURL))
return true, nil
}
db.Namespace = ta.ns
db.Name = util.HashString(db.Spec.ScmInfo.SCMURL + db.Spec.ScmInfo.Tag + db.Spec.ScmInfo.Path)
db.Spec.BuildRecipeConfigMap = db.Name + "configmap"
cfgMap := corev1.ConfigMap{}
cfgMap.Name = db.Spec.BuildRecipeConfigMap
cfgMap.Namespace = ta.ns
cfgMap.Data = map[string]string{"build.yaml": string(buildRecipeBytes)}
_, err = kubeClient.CoreV1().ConfigMaps(ta.ns).Create(context.TODO(), &cfgMap, metav1.CreateOptions{})
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("unable to create configmap %s for dependencybuild repo %s: %s", cfgMap.Name, db.Spec.ScmInfo.SCMURL, err.Error()))
return
ta.Logf(fmt.Sprintf("error retrieving configmap %s for dependencybuild repo %s: %s", cfgMap.Name, db.Spec.ScmInfo.SCMURL, err.Error()))
}
err = wait.PollUntilContextTimeout(context.TODO(), ta.interval, ta.timeout, true, func(ctx context.Context) (done bool, err error) {
retrievedCfgMap, err := kubeClient.CoreV1().ConfigMaps(ta.ns).Get(context.TODO(), cfgMap.Name, metav1.GetOptions{})
if retrievedCfgMap != nil {
ta.Logf(fmt.Sprintf("successfully retrieved configmap %s for dependencybuild repo %s", cfgMap.Name, db.Spec.ScmInfo.SCMURL))
return true, nil
}
if err != nil {
ta.Logf(fmt.Sprintf("error retrieving configmap %s for dependencybuild repo %s: %s", cfgMap.Name, db.Spec.ScmInfo.SCMURL, err.Error()))
}
return false, nil
})
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("timed out waiting for creation of configmap %s for dependencybuild repo %s", cfgMap.Name, db.Spec.ScmInfo.SCMURL))
return
return false, nil
})
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("timed out waiting for creation of configmap %s for dependencybuild repo %s", cfgMap.Name, db.Spec.ScmInfo.SCMURL))
return
}
_, err = jvmClient.JvmbuildserviceV1alpha1().DependencyBuilds(ta.ns).Create(context.TODO(), &db, metav1.CreateOptions{})
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("unable to create dependencybuild %s for repo %s: %s", db.Name, db.Spec.ScmInfo.SCMURL, err.Error()))
return
}
err = wait.PollUntilContextTimeout(context.TODO(), ta.interval, ta.timeout, true, func(ctx context.Context) (done bool, err error) {
retrievedDb, err := jvmClient.JvmbuildserviceV1alpha1().DependencyBuilds(ta.ns).Get(context.TODO(), db.Name, metav1.GetOptions{})
if retrievedDb != nil {
ta.Logf(fmt.Sprintf("successfully retrieved dependencybuild %s for repo %s", retrievedDb.Name, retrievedDb.Spec.ScmInfo.SCMURL))
return true, nil
}
_, err = jvmClient.JvmbuildserviceV1alpha1().DependencyBuilds(ta.ns).Create(context.TODO(), &db, metav1.CreateOptions{})
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("unable to create dependencybuild %s for repo %s: %s", db.Name, db.Spec.ScmInfo.SCMURL, err.Error()))
return
ta.Logf(fmt.Sprintf("error retrieving dependencybuild %s for repo %s: %s", db.Name, db.Spec.ScmInfo.SCMURL, err.Error()))
}
err = wait.PollUntilContextTimeout(context.TODO(), ta.interval, ta.timeout, true, func(ctx context.Context) (done bool, err error) {
return false, nil
})
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("timed out waiting for creation of dependencybuild %s for repo %s", db.Name, db.Spec.ScmInfo.SCMURL))
return
}

ta.t.Run(fmt.Sprintf("configmap dependencybuild complete for repo %s", db.Spec.ScmInfo.SCMURL), func(t *testing.T) {
defer GenerateStatusReport(ta.ns, jvmClient, kubeClient, tektonClient)
err = wait.PollUntilContextTimeout(context.TODO(), ta.interval, time.Hour, true, func(ctx context.Context) (done bool, err error) {
retrievedDb, err := jvmClient.JvmbuildserviceV1alpha1().DependencyBuilds(ta.ns).Get(context.TODO(), db.Name, metav1.GetOptions{})
if retrievedDb != nil {
ta.Logf(fmt.Sprintf("successfully retrieved dependencybuild %s for repo %s", retrievedDb.Name, retrievedDb.Spec.ScmInfo.SCMURL))
return true, nil
}
if err != nil {
ta.Logf(fmt.Sprintf("error retrieving dependencybuild %s for repo %s: %s", db.Name, db.Spec.ScmInfo.SCMURL, err.Error()))
return false, err
}
return false, nil
dbComplete := true
if retrievedDb.Status.State == v1alpha1.DependencyBuildStateFailed {
ta.Logf(fmt.Sprintf("depedencybuild %s for repo %s FAILED", db.Name, db.Spec.ScmInfo.SCMURL))
return false, fmt.Errorf("depedencybuild %s for repo %s FAILED", retrievedDb.Name, retrievedDb.Spec.ScmInfo.SCMURL)
} else if retrievedDb.Status.State != v1alpha1.DependencyBuildStateComplete {
ta.Logf(fmt.Sprintf("depedencybuild %s for repo %s not complete", retrievedDb.Name, retrievedDb.Spec.ScmInfo.SCMURL))
dbComplete = false
}
return dbComplete, nil
})
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("timed out waiting for creation of dependencybuild %s for repo %s", db.Name, db.Spec.ScmInfo.SCMURL))
return
debugAndFailTest(ta, fmt.Sprintf("timed out waiting for configmap dependencybuild %s for repo %s to complete", db.Name, db.Spec.ScmInfo.SCMURL))
}
})

ta.t.Run(fmt.Sprintf("configmap dependencybuild complete for repo %s", db.Spec.ScmInfo.SCMURL), func(t *testing.T) {
defer GenerateStatusReport(ta.ns, jvmClient, kubeClient, tektonClient)
err = wait.PollUntilContextTimeout(context.TODO(), ta.interval, time.Hour, true, func(ctx context.Context) (done bool, err error) {
retrievedDb, err := jvmClient.JvmbuildserviceV1alpha1().DependencyBuilds(ta.ns).Get(context.TODO(), db.Name, metav1.GetOptions{})
if retrievedDb != nil {
ta.Logf(fmt.Sprintf("successfully retrieved dependencybuild %s for repo %s", retrievedDb.Name, retrievedDb.Spec.ScmInfo.SCMURL))
}
if err != nil {
ta.Logf(fmt.Sprintf("error retrieving dependencybuild %s for repo %s: %s", db.Name, db.Spec.ScmInfo.SCMURL, err.Error()))
return false, err
}
dbComplete := true
if retrievedDb.Status.State == v1alpha1.DependencyBuildStateFailed {
ta.Logf(fmt.Sprintf("depedencybuild %s for repo %s FAILED", db.Name, db.Spec.ScmInfo.SCMURL))
return false, fmt.Errorf("depedencybuild %s for repo %s FAILED", retrievedDb.Name, retrievedDb.Spec.ScmInfo.SCMURL)
} else if retrievedDb.Status.State != v1alpha1.DependencyBuildStateComplete {
ta.Logf(fmt.Sprintf("depedencybuild %s for repo %s not complete", retrievedDb.Name, retrievedDb.Spec.ScmInfo.SCMURL))
dbComplete = false
}
return dbComplete, nil
})
ta.t.Run(fmt.Sprintf("configmap dependencybuild for repo %s contains buildrecipe", db.Spec.ScmInfo.SCMURL), func(t *testing.T) {
defer GenerateStatusReport(ta.ns, jvmClient, kubeClient, tektonClient)
err = wait.PollUntilContextTimeout(context.TODO(), ta.interval, time.Hour, true, func(ctx context.Context) (done bool, err error) {
retrievedDb, err := jvmClient.JvmbuildserviceV1alpha1().DependencyBuilds(ta.ns).Get(context.TODO(), db.Name, metav1.GetOptions{})
if retrievedDb != nil {
ta.Logf(fmt.Sprintf("successfully retrieved dependencybuild %s for repo %s", retrievedDb.Name, retrievedDb.Spec.ScmInfo.SCMURL))
}
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("timed out waiting for configmap dependencybuild %s for repo %s to complete", db.Name, db.Spec.ScmInfo.SCMURL))
ta.Logf(fmt.Sprintf("error retrieving dependencybuild %s for repo %s: %s", db.Name, db.Spec.ScmInfo.SCMURL, err.Error()))
return false, err
}
})

ta.t.Run(fmt.Sprintf("configmap dependencybuild for repo %s contains buildrecipe", db.Spec.ScmInfo.SCMURL), func(t *testing.T) {
defer GenerateStatusReport(ta.ns, jvmClient, kubeClient, tektonClient)
err = wait.PollUntilContextTimeout(context.TODO(), ta.interval, time.Hour, true, func(ctx context.Context) (done bool, err error) {
retrievedDb, err := jvmClient.JvmbuildserviceV1alpha1().DependencyBuilds(ta.ns).Get(context.TODO(), db.Name, metav1.GetOptions{})
if retrievedDb != nil {
ta.Logf(fmt.Sprintf("successfully retrieved dependencybuild %s for repo %s", retrievedDb.Name, retrievedDb.Spec.ScmInfo.SCMURL))
containsRecipe := false
for _, ba := range retrievedDb.Status.BuildAttempts {
ta.Logf(fmt.Sprintf("%+v", ba.Recipe))
samePluginsDisabled, _ := kmp.SafeEqual(ba.Recipe.DisabledPlugins, buildRecipe.DisabledPlugins)
if ba.Recipe.JavaVersion == buildRecipe.JavaVersion && samePluginsDisabled {
containsRecipe = true
}
if err != nil {
ta.Logf(fmt.Sprintf("error retrieving dependencybuild %s for repo %s: %s", db.Name, db.Spec.ScmInfo.SCMURL, err.Error()))
return false, err
}
containsRecipe := false
for _, ba := range retrievedDb.Status.BuildAttempts {
ta.Logf(fmt.Sprintf("%+v", ba.Recipe))
samePluginsDisabled, _ := kmp.SafeEqual(ba.Recipe.DisabledPlugins, buildRecipe.DisabledPlugins)
if ba.Recipe.JavaVersion == buildRecipe.JavaVersion && samePluginsDisabled {
containsRecipe = true
}
}
return containsRecipe, nil
})
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("timed out waiting for configmap dependencybuild %s for repo %s to be retrieved", db.Name, db.Spec.ScmInfo.SCMURL))
}
return containsRecipe, nil
})
}
// TODO improve recipe comparison
if err != nil {
debugAndFailTest(ta, fmt.Sprintf("timed out waiting for configmap dependencybuild %s for repo %s to be retrieved", db.Name, db.Spec.ScmInfo.SCMURL))
}
})
}
// TODO improve recipe comparison
}

func watchEvents(eventClient v1.EventInterface, ta *testArgs) {
Expand Down

0 comments on commit fa7b78c

Please sign in to comment.