diff --git a/.tekton/tasks/e2e-test.yaml b/.tekton/tasks/e2e-test.yaml
index a5bc52c189..6300d145d2 100644
--- a/.tekton/tasks/e2e-test.yaml
+++ b/.tekton/tasks/e2e-test.yaml
@@ -26,7 +26,7 @@ spec:
type: string
steps:
- name: e2e-test
- image: quay.io/redhat-user-workloads/rhtap-qe-shared-tenant/konflux-e2e/konflux-e2e-tests:3fdbf3e389e0cde01b878f34f6d5facaac3b377c
+ image: quay.io/redhat-user-workloads/rhtap-qe-shared-tenant/konflux-e2e/konflux-e2e-tests:91ce627177c27570aebf1bb59b9fd7614f229152
# a la infra-deployment updates, when PRs merge in e2e-tests, PRs will be opened
# against build-definitions to update this tag
args: [
diff --git a/hack/generate-buildah-remote.sh b/hack/generate-buildah-remote.sh
index ffbd943422..da061ed821 100755
--- a/hack/generate-buildah-remote.sh
+++ b/hack/generate-buildah-remote.sh
@@ -1,12 +1,14 @@
#!/bin/bash
+set -euo pipefail
SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
-cd $SCRIPTDIR/../task-generator
+cd "${SCRIPTDIR}/../task-generator"
go build -o /tmp/remote-generator ./remote/main.go
-
-/tmp/remote-generator --buildah-task=$SCRIPTDIR/../task/buildah/0.1/buildah.yaml \
- --remote-task=$SCRIPTDIR/../task/buildah-remote/0.1/buildah-remote.yaml
-/tmp/remote-generator --buildah-task=$SCRIPTDIR/../task/buildah-oci-ta/0.1/buildah-oci-ta.yaml \
- --remote-task=$SCRIPTDIR/../task/buildah-remote-oci-ta/0.1/buildah-remote-oci-ta.yaml
+for version in 0.1 0.2; do
+ /tmp/remote-generator --buildah-task="${SCRIPTDIR}/../task/buildah/${version}/buildah.yaml" \
+ --remote-task="${SCRIPTDIR}/../task/buildah-remote/${version}/buildah-remote.yaml"
+ /tmp/remote-generator --buildah-task="${SCRIPTDIR}/../task/buildah-oci-ta/${version}/buildah-oci-ta.yaml" \
+ --remote-task="${SCRIPTDIR}/../task/buildah-remote-oci-ta/${version}/buildah-remote-oci-ta.yaml"
+done
diff --git a/pipelines/docker-build/patch.yaml b/pipelines/docker-build/patch.yaml
index b3ee8d64e0..560b76342f 100644
--- a/pipelines/docker-build/patch.yaml
+++ b/pipelines/docker-build/patch.yaml
@@ -12,7 +12,7 @@
path: /spec/tasks/3/taskRef
value:
name: buildah
- version: "0.1"
+ version: "0.2"
- op: add
path: /spec/params/-
value:
diff --git a/pipelines/template-build/template-build.yaml b/pipelines/template-build/template-build.yaml
index 4876c24d4a..c852f64ef1 100644
--- a/pipelines/template-build/template-build.yaml
+++ b/pipelines/template-build/template-build.yaml
@@ -1,3 +1,6 @@
+# This build pipeline template is intended to be processed by scripts
+# under hack/ directory rather than by kustomize directly.
+
apiVersion: tekton.dev/v1
kind: Pipeline
metadata:
@@ -64,6 +67,7 @@ spec:
value: "$(params.skip-checks)"
taskRef:
name: init
+ # A pointer for referencing the correct version of task in the built pipeline bundles.
version: "0.2"
- name: clone-repository
when:
@@ -133,8 +137,6 @@ spec:
params:
- name: BINARY_IMAGE
value: "$(params.output-image)"
- - name: BASE_IMAGES
- value: "$(tasks.build-container.results.BASE_IMAGES_DIGESTS)"
workspaces:
- name: workspace
workspace: workspace
@@ -147,8 +149,6 @@ spec:
name: deprecated-image-check
version: "0.4"
params:
- - name: BASE_IMAGES_DIGESTS
- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
- name: IMAGE_URL
value: $(tasks.build-container.results.IMAGE_URL)
- name: IMAGE_DIGEST
diff --git a/task-generator/remote/main.go b/task-generator/remote/main.go
index 7c8932ba08..d03c0e4359 100644
--- a/task-generator/remote/main.go
+++ b/task-generator/remote/main.go
@@ -16,6 +16,10 @@ package main
import (
"bytes"
"flag"
+ "os"
+ "path/filepath"
+ "strings"
+
tektonapi "github.com/tektoncd/pipeline/pkg/apis/pipeline/v1"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
@@ -23,10 +27,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/cli-runtime/pkg/printers"
klog "k8s.io/klog/v2"
- "os"
- "path/filepath"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
- "strings"
)
func main() {
@@ -56,7 +57,11 @@ func main() {
y := printers.YAMLPrinter{}
b := bytes.Buffer{}
_ = y.PrintObj(&task, &b)
- err := os.WriteFile(buildahRemoteTask, b.Bytes(), 0660) //#nosec
+ err := os.MkdirAll(filepath.Dir(buildahRemoteTask), 0755) //#nosec G301 -- all the dirs in the repo are 755
+ if err != nil {
+ panic(err)
+ }
+ err = os.WriteFile(buildahRemoteTask, b.Bytes(), 0660) //#nosec
if err != nil {
panic(err)
}
@@ -127,6 +132,10 @@ fi
`
env := "$PODMAN_PORT_FORWARD \\\n"
+
+ // disable podman subscription-manager integration
+ env += " --tmpfs /run/secrets \\\n"
+
// Before the build we sync the contents of the workspace to the remote host
for _, workspace := range task.Spec.Workspaces {
ret += "\nrsync -ra $(workspaces." + workspace.Name + ".path)/ \"$SSH_HOST:$BUILD_DIR/workspaces/" + workspace.Name + "/\""
diff --git a/task/buildah-10gb/0.2/MIGRATION.md b/task/buildah-10gb/0.2/MIGRATION.md
new file mode 100644
index 0000000000..f0a1a0ad06
--- /dev/null
+++ b/task/buildah-10gb/0.2/MIGRATION.md
@@ -0,0 +1,46 @@
+# Migration from 0.1 to 0.2
+
+Version 0.2:
+
+* Removes the `BASE_IMAGES_DIGESTS` result. Please remove all the references to this
+ result from your pipeline.
+ * Base images and their digests can be found in the SBOM for the output image.
+* No longer writes the `base_images_from_dockerfile` file into the `source` workspace.
+
+## Konflux-specific
+
+In a typical Konflux pipeline, the two tasks that used to depend on the `BASE_IMAGES_DIGESTS`
+result are `build-source-image` and `deprecated-base-image-check`.
+
+1. Make sure your version of `deprecated-base-image-check` is at least `0.4`.
+2. Make sure your version of `build-source-image` supports reading base images from
+ the SBOM. Version `0.1` supports it since 2024-07-15. In the logs of your build
+ pipeline, you should see that the build-source-image task now has a GET-BASE-IMAGES
+ step. Once you stop passing the `BASE_IMAGES_DIGESTS` param, this step will emit
+ logs about handling the SBOM.
+3. Remove the parameters that reference the `BASE_IMAGES_DIGESTS` result:
+
+```diff
+@@ -255,10 +255,8 @@ spec:
+ - name: build-source-image
+ params:
+ - name: BINARY_IMAGE
+ value: $(params.output-image)
+- - name: BASE_IMAGES
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ runAfter:
+ - build-container
+ taskRef:
+ params:
+@@ -282,10 +280,8 @@ spec:
+ - name: workspace
+ workspace: workspace
+ - name: deprecated-base-image-check
+ params:
+- - name: BASE_IMAGES_DIGESTS
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ - name: IMAGE_URL
+ value: $(tasks.build-container.results.IMAGE_URL)
+ - name: IMAGE_DIGEST
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
+```
diff --git a/task/buildah-10gb/0.2/kustomization.yaml b/task/buildah-10gb/0.2/kustomization.yaml
new file mode 100644
index 0000000000..2c6158898f
--- /dev/null
+++ b/task/buildah-10gb/0.2/kustomization.yaml
@@ -0,0 +1,10 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../buildah/0.2
+
+patches:
+- path: patch.yaml
+ target:
+ kind: Task
diff --git a/task/buildah-10gb/0.2/patch.yaml b/task/buildah-10gb/0.2/patch.yaml
new file mode 100644
index 0000000000..3cbc78cfde
--- /dev/null
+++ b/task/buildah-10gb/0.2/patch.yaml
@@ -0,0 +1,9 @@
+- op: replace
+ path: /metadata/name
+ value: buildah-10gb
+- op: replace
+ path: /spec/steps/0/computeResources/limits/memory
+ value: 10Gi
+- op: replace
+ path: /spec/steps/0/computeResources/requests/memory
+ value: 8Gi
diff --git a/task/buildah-20gb/0.2/MIGRATION.md b/task/buildah-20gb/0.2/MIGRATION.md
new file mode 100644
index 0000000000..f0a1a0ad06
--- /dev/null
+++ b/task/buildah-20gb/0.2/MIGRATION.md
@@ -0,0 +1,46 @@
+# Migration from 0.1 to 0.2
+
+Version 0.2:
+
+* Removes the `BASE_IMAGES_DIGESTS` result. Please remove all the references to this
+ result from your pipeline.
+ * Base images and their digests can be found in the SBOM for the output image.
+* No longer writes the `base_images_from_dockerfile` file into the `source` workspace.
+
+## Konflux-specific
+
+In a typical Konflux pipeline, the two tasks that used to depend on the `BASE_IMAGES_DIGESTS`
+result are `build-source-image` and `deprecated-base-image-check`.
+
+1. Make sure your version of `deprecated-base-image-check` is at least `0.4`.
+2. Make sure your version of `build-source-image` supports reading base images from
+ the SBOM. Version `0.1` supports it since 2024-07-15. In the logs of your build
+ pipeline, you should see that the build-source-image task now has a GET-BASE-IMAGES
+ step. Once you stop passing the `BASE_IMAGES_DIGESTS` param, this step will emit
+ logs about handling the SBOM.
+3. Remove the parameters that reference the `BASE_IMAGES_DIGESTS` result:
+
+```diff
+@@ -255,10 +255,8 @@ spec:
+ - name: build-source-image
+ params:
+ - name: BINARY_IMAGE
+ value: $(params.output-image)
+- - name: BASE_IMAGES
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ runAfter:
+ - build-container
+ taskRef:
+ params:
+@@ -282,10 +280,8 @@ spec:
+ - name: workspace
+ workspace: workspace
+ - name: deprecated-base-image-check
+ params:
+- - name: BASE_IMAGES_DIGESTS
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ - name: IMAGE_URL
+ value: $(tasks.build-container.results.IMAGE_URL)
+ - name: IMAGE_DIGEST
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
+```
diff --git a/task/buildah-20gb/0.2/kustomization.yaml b/task/buildah-20gb/0.2/kustomization.yaml
new file mode 100644
index 0000000000..2c6158898f
--- /dev/null
+++ b/task/buildah-20gb/0.2/kustomization.yaml
@@ -0,0 +1,10 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../buildah/0.2
+
+patches:
+- path: patch.yaml
+ target:
+ kind: Task
diff --git a/task/buildah-20gb/0.2/patch.yaml b/task/buildah-20gb/0.2/patch.yaml
new file mode 100644
index 0000000000..58b85da8d8
--- /dev/null
+++ b/task/buildah-20gb/0.2/patch.yaml
@@ -0,0 +1,9 @@
+- op: replace
+ path: /metadata/name
+ value: buildah-20gb
+- op: replace
+ path: /spec/steps/0/computeResources/limits/memory
+ value: 20Gi
+- op: replace
+ path: /spec/steps/0/computeResources/requests/memory
+ value: 16Gi
diff --git a/task/buildah-24gb/0.2/MIGRATION.md b/task/buildah-24gb/0.2/MIGRATION.md
new file mode 100644
index 0000000000..f0a1a0ad06
--- /dev/null
+++ b/task/buildah-24gb/0.2/MIGRATION.md
@@ -0,0 +1,46 @@
+# Migration from 0.1 to 0.2
+
+Version 0.2:
+
+* Removes the `BASE_IMAGES_DIGESTS` result. Please remove all the references to this
+ result from your pipeline.
+ * Base images and their digests can be found in the SBOM for the output image.
+* No longer writes the `base_images_from_dockerfile` file into the `source` workspace.
+
+## Konflux-specific
+
+In a typical Konflux pipeline, the two tasks that used to depend on the `BASE_IMAGES_DIGESTS`
+result are `build-source-image` and `deprecated-base-image-check`.
+
+1. Make sure your version of `deprecated-base-image-check` is at least `0.4`.
+2. Make sure your version of `build-source-image` supports reading base images from
+ the SBOM. Version `0.1` supports it since 2024-07-15. In the logs of your build
+ pipeline, you should see that the build-source-image task now has a GET-BASE-IMAGES
+ step. Once you stop passing the `BASE_IMAGES_DIGESTS` param, this step will emit
+ logs about handling the SBOM.
+3. Remove the parameters that reference the `BASE_IMAGES_DIGESTS` result:
+
+```diff
+@@ -255,10 +255,8 @@ spec:
+ - name: build-source-image
+ params:
+ - name: BINARY_IMAGE
+ value: $(params.output-image)
+- - name: BASE_IMAGES
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ runAfter:
+ - build-container
+ taskRef:
+ params:
+@@ -282,10 +280,8 @@ spec:
+ - name: workspace
+ workspace: workspace
+ - name: deprecated-base-image-check
+ params:
+- - name: BASE_IMAGES_DIGESTS
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ - name: IMAGE_URL
+ value: $(tasks.build-container.results.IMAGE_URL)
+ - name: IMAGE_DIGEST
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
+```
diff --git a/task/buildah-24gb/0.2/kustomization.yaml b/task/buildah-24gb/0.2/kustomization.yaml
new file mode 100644
index 0000000000..2c6158898f
--- /dev/null
+++ b/task/buildah-24gb/0.2/kustomization.yaml
@@ -0,0 +1,10 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../buildah/0.2
+
+patches:
+- path: patch.yaml
+ target:
+ kind: Task
diff --git a/task/buildah-24gb/0.2/patch.yaml b/task/buildah-24gb/0.2/patch.yaml
new file mode 100644
index 0000000000..8130644828
--- /dev/null
+++ b/task/buildah-24gb/0.2/patch.yaml
@@ -0,0 +1,15 @@
+- op: replace
+ path: /metadata/name
+ value: buildah-24gb
+- op: replace
+ path: /spec/steps/0/computeResources/limits/memory
+ value: 24Gi
+- op: replace
+ path: /spec/steps/0/computeResources/requests/memory
+ value: 20Gi
+- op: replace
+ path: /spec/steps/0/computeResources/limits/cpu
+ value: "20"
+- op: replace
+ path: /spec/steps/0/computeResources/requests/cpu
+ value: "10"
diff --git a/task/buildah-6gb/0.2/MIGRATION.md b/task/buildah-6gb/0.2/MIGRATION.md
new file mode 100644
index 0000000000..f0a1a0ad06
--- /dev/null
+++ b/task/buildah-6gb/0.2/MIGRATION.md
@@ -0,0 +1,46 @@
+# Migration from 0.1 to 0.2
+
+Version 0.2:
+
+* Removes the `BASE_IMAGES_DIGESTS` result. Please remove all the references to this
+ result from your pipeline.
+ * Base images and their digests can be found in the SBOM for the output image.
+* No longer writes the `base_images_from_dockerfile` file into the `source` workspace.
+
+## Konflux-specific
+
+In a typical Konflux pipeline, the two tasks that used to depend on the `BASE_IMAGES_DIGESTS`
+result are `build-source-image` and `deprecated-base-image-check`.
+
+1. Make sure your version of `deprecated-base-image-check` is at least `0.4`.
+2. Make sure your version of `build-source-image` supports reading base images from
+ the SBOM. Version `0.1` supports it since 2024-07-15. In the logs of your build
+ pipeline, you should see that the build-source-image task now has a GET-BASE-IMAGES
+ step. Once you stop passing the `BASE_IMAGES_DIGESTS` param, this step will emit
+ logs about handling the SBOM.
+3. Remove the parameters that reference the `BASE_IMAGES_DIGESTS` result:
+
+```diff
+@@ -255,10 +255,8 @@ spec:
+ - name: build-source-image
+ params:
+ - name: BINARY_IMAGE
+ value: $(params.output-image)
+- - name: BASE_IMAGES
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ runAfter:
+ - build-container
+ taskRef:
+ params:
+@@ -282,10 +280,8 @@ spec:
+ - name: workspace
+ workspace: workspace
+ - name: deprecated-base-image-check
+ params:
+- - name: BASE_IMAGES_DIGESTS
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ - name: IMAGE_URL
+ value: $(tasks.build-container.results.IMAGE_URL)
+ - name: IMAGE_DIGEST
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
+```
diff --git a/task/buildah-6gb/0.2/kustomization.yaml b/task/buildah-6gb/0.2/kustomization.yaml
new file mode 100644
index 0000000000..2c6158898f
--- /dev/null
+++ b/task/buildah-6gb/0.2/kustomization.yaml
@@ -0,0 +1,10 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../buildah/0.2
+
+patches:
+- path: patch.yaml
+ target:
+ kind: Task
diff --git a/task/buildah-6gb/0.2/patch.yaml b/task/buildah-6gb/0.2/patch.yaml
new file mode 100644
index 0000000000..63b1281bba
--- /dev/null
+++ b/task/buildah-6gb/0.2/patch.yaml
@@ -0,0 +1,9 @@
+- op: replace
+ path: /metadata/name
+ value: buildah-6gb
+- op: replace
+ path: /spec/steps/0/computeResources/limits/memory
+ value: 6Gi
+- op: replace
+ path: /spec/steps/0/computeResources/requests/memory
+ value: 4Gi
diff --git a/task/buildah-8gb/0.2/MIGRATION.md b/task/buildah-8gb/0.2/MIGRATION.md
new file mode 100644
index 0000000000..f0a1a0ad06
--- /dev/null
+++ b/task/buildah-8gb/0.2/MIGRATION.md
@@ -0,0 +1,46 @@
+# Migration from 0.1 to 0.2
+
+Version 0.2:
+
+* Removes the `BASE_IMAGES_DIGESTS` result. Please remove all the references to this
+ result from your pipeline.
+ * Base images and their digests can be found in the SBOM for the output image.
+* No longer writes the `base_images_from_dockerfile` file into the `source` workspace.
+
+## Konflux-specific
+
+In a typical Konflux pipeline, the two tasks that used to depend on the `BASE_IMAGES_DIGESTS`
+result are `build-source-image` and `deprecated-base-image-check`.
+
+1. Make sure your version of `deprecated-base-image-check` is at least `0.4`.
+2. Make sure your version of `build-source-image` supports reading base images from
+ the SBOM. Version `0.1` supports it since 2024-07-15. In the logs of your build
+ pipeline, you should see that the build-source-image task now has a GET-BASE-IMAGES
+ step. Once you stop passing the `BASE_IMAGES_DIGESTS` param, this step will emit
+ logs about handling the SBOM.
+3. Remove the parameters that reference the `BASE_IMAGES_DIGESTS` result:
+
+```diff
+@@ -255,10 +255,8 @@ spec:
+ - name: build-source-image
+ params:
+ - name: BINARY_IMAGE
+ value: $(params.output-image)
+- - name: BASE_IMAGES
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ runAfter:
+ - build-container
+ taskRef:
+ params:
+@@ -282,10 +280,8 @@ spec:
+ - name: workspace
+ workspace: workspace
+ - name: deprecated-base-image-check
+ params:
+- - name: BASE_IMAGES_DIGESTS
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ - name: IMAGE_URL
+ value: $(tasks.build-container.results.IMAGE_URL)
+ - name: IMAGE_DIGEST
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
+```
diff --git a/task/buildah-8gb/0.2/kustomization.yaml b/task/buildah-8gb/0.2/kustomization.yaml
new file mode 100644
index 0000000000..2c6158898f
--- /dev/null
+++ b/task/buildah-8gb/0.2/kustomization.yaml
@@ -0,0 +1,10 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../buildah/0.2
+
+patches:
+- path: patch.yaml
+ target:
+ kind: Task
diff --git a/task/buildah-8gb/0.2/patch.yaml b/task/buildah-8gb/0.2/patch.yaml
new file mode 100644
index 0000000000..58a3061956
--- /dev/null
+++ b/task/buildah-8gb/0.2/patch.yaml
@@ -0,0 +1,9 @@
+- op: replace
+ path: /metadata/name
+ value: buildah-8gb
+- op: replace
+ path: /spec/steps/0/computeResources/limits/memory
+ value: 8Gi
+- op: replace
+ path: /spec/steps/0/computeResources/requests/memory
+ value: 6Gi
diff --git a/task/buildah-oci-ta/0.1/README.md b/task/buildah-oci-ta/0.1/README.md
index 6cb12c142b..6e96867d20 100644
--- a/task/buildah-oci-ta/0.1/README.md
+++ b/task/buildah-oci-ta/0.1/README.md
@@ -8,6 +8,7 @@ When prefetch-dependencies task was activated it is using its artifacts to run b
## Parameters
|name|description|default value|required|
|---|---|---|---|
+|ACTIVATION_KEY|Name of secret which contains subscription activation key|activation-key|false|
|ADDITIONAL_SECRET|Name of a secret which will be made available to the build with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET|does-not-exist|false|
|ADD_CAPABILITIES|Comma separated list of extra capabilities to add when running 'buildah build'|""|false|
|BUILD_ARGS|Array of --build-arg values ("arg=value" strings)|[]|false|
diff --git a/task/buildah-oci-ta/0.1/buildah-oci-ta.yaml b/task/buildah-oci-ta/0.1/buildah-oci-ta.yaml
index e63c334f0a..5320a52d3c 100644
--- a/task/buildah-oci-ta/0.1/buildah-oci-ta.yaml
+++ b/task/buildah-oci-ta/0.1/buildah-oci-ta.yaml
@@ -16,6 +16,10 @@ spec:
When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts.
When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment.
params:
+ - name: ACTIVATION_KEY
+ description: Name of secret which contains subscription activation key
+ type: string
+ default: activation-key
- name: ADDITIONAL_SECRET
description: Name of a secret which will be made available to the build
with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET
@@ -140,6 +144,10 @@ spec:
description: The counting of Java components by publisher in JSON format
type: string
volumes:
+ - name: activation-key
+ secret:
+ optional: true
+ secretName: $(params.ACTIVATION_KEY)
- name: additional-secret
secret:
optional: true
@@ -163,6 +171,8 @@ spec:
emptyDir: {}
stepTemplate:
env:
+ - name: ACTIVATION_KEY
+ value: $(params.ACTIVATION_KEY)
- name: ADDITIONAL_SECRET
value: $(params.ADDITIONAL_SECRET)
- name: ADD_CAPABILITIES
@@ -221,6 +231,8 @@ spec:
name: varlibcontainers
- mountPath: /entitlement
name: etc-pki-entitlement
+ - mountPath: /activation-key
+ name: activation-key
- mountPath: /additional-secret
name: additional-secret
- mountPath: /mnt/trusted-ca
@@ -353,6 +365,13 @@ spec:
echo "Adding the entitlement to the build"
fi
+ ACTIVATION_KEY_PATH="/activation-key"
+ if [ -d "$ACTIVATION_KEY_PATH" ]; then
+ cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/activation-key:/activation-key"
+ echo "Adding activation key to the build"
+ fi
+
ADDITIONAL_SECRET_PATH="/additional-secret"
ADDITIONAL_SECRET_TMP="/tmp/additional-secret"
if [ -d "$ADDITIONAL_SECRET_PATH" ]; then
diff --git a/task/buildah-oci-ta/0.2/MIGRATION.md b/task/buildah-oci-ta/0.2/MIGRATION.md
new file mode 100644
index 0000000000..d6812093cc
--- /dev/null
+++ b/task/buildah-oci-ta/0.2/MIGRATION.md
@@ -0,0 +1,48 @@
+# Migration from 0.1 to 0.2
+
+Version 0.2:
+
+* Removes the `BASE_IMAGES_DIGESTS` result. Please remove all the references to this
+ result from your pipeline.
+ * Base images and their digests can be found in the SBOM for the output image.
+* Removes the `DOCKER_AUTH` param. It didn't do anything in the later releases of
+ version 0.1. Please stop passing this param to the buildah task if you used to
+ do so with version 0.1.
+
+## Konflux-specific
+
+In a typical Konflux pipeline, the two tasks that used to depend on the `BASE_IMAGES_DIGESTS`
+result are `build-source-image` and `deprecated-base-image-check`.
+
+1. Make sure your version of `deprecated-base-image-check` is at least `0.4`.
+2. Make sure your version of `build-source-image` supports reading base images from
+ the SBOM. Version `0.1` supports it since 2024-07-15. In the logs of your build
+ pipeline, you should see that the build-source-image task now has a GET-BASE-IMAGES
+ step. Once you stop passing the `BASE_IMAGES_DIGESTS` param, this step will emit
+ logs about handling the SBOM.
+3. Remove the parameters that reference the `BASE_IMAGES_DIGESTS` result:
+
+```diff
+@@ -255,10 +255,8 @@ spec:
+ - name: build-source-image
+ params:
+ - name: BINARY_IMAGE
+ value: $(params.output-image)
+- - name: BASE_IMAGES
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ runAfter:
+ - build-container
+ taskRef:
+ params:
+@@ -282,10 +280,8 @@ spec:
+ - name: workspace
+ workspace: workspace
+ - name: deprecated-base-image-check
+ params:
+- - name: BASE_IMAGES_DIGESTS
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ - name: IMAGE_URL
+ value: $(tasks.build-container.results.IMAGE_URL)
+ - name: IMAGE_DIGEST
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
+```
diff --git a/task/buildah-oci-ta/0.2/README.md b/task/buildah-oci-ta/0.2/README.md
new file mode 100644
index 0000000000..532e96b0c1
--- /dev/null
+++ b/task/buildah-oci-ta/0.2/README.md
@@ -0,0 +1,44 @@
+# buildah-oci-ta task
+
+Buildah task builds source code into a container image and pushes the image into container registry using buildah tool.
+In addition it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool.
+When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts.
+When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment.
+
+## Parameters
+|name|description|default value|required|
+|---|---|---|---|
+|ACTIVATION_KEY|Name of secret which contains subscription activation key|activation-key|false|
+|ADDITIONAL_SECRET|Name of a secret which will be made available to the build with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET|does-not-exist|false|
+|ADD_CAPABILITIES|Comma separated list of extra capabilities to add when running 'buildah build'|""|false|
+|BUILD_ARGS|Array of --build-arg values ("arg=value" strings)|[]|false|
+|BUILD_ARGS_FILE|Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file|""|false|
+|CACHI2_ARTIFACT|The Trusted Artifact URI pointing to the artifact with the prefetched dependencies.|""|false|
+|COMMIT_SHA|The image is built from this commit.|""|false|
+|CONTEXT|Path to the directory to use as context.|.|false|
+|DOCKERFILE|Path to the Dockerfile to build.|./Dockerfile|false|
+|ENTITLEMENT_SECRET|Name of secret which contains the entitlement certificates|etc-pki-entitlement|false|
+|HERMETIC|Determines if build will be executed without network access.|false|false|
+|IMAGE|Reference of the image buildah will produce.||true|
+|IMAGE_EXPIRES_AFTER|Delete image tag after specified time. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.|""|false|
+|PREFETCH_INPUT|In case it is not empty, the prefetched content should be made available to the build.|""|false|
+|SKIP_UNUSED_STAGES|Whether to skip stages in Containerfile that seem unused by subsequent stages|true|false|
+|SOURCE_ARTIFACT|The Trusted Artifact URI pointing to the artifact with the application source code.||true|
+|SQUASH|Squash all new and previous layers added as a part of this build, as per --squash|false|false|
+|STORAGE_DRIVER|Storage driver to configure for buildah|vfs|false|
+|TARGET_STAGE|Target stage in Dockerfile to build. If not specified, the Dockerfile is processed entirely to (and including) its last stage.|""|false|
+|TLSVERIFY|Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry)|true|false|
+|YUM_REPOS_D_FETCHED|Path in source workspace where dynamically-fetched repos are present|fetched.repos.d|false|
+|YUM_REPOS_D_SRC|Path in the git repository in which yum repository files are stored|repos.d|false|
+|YUM_REPOS_D_TARGET|Target path on the container in which yum repository files should be made available|/etc/yum.repos.d|false|
+|caTrustConfigMapKey|The name of the key in the ConfigMap that contains the CA bundle data.|ca-bundle.crt|false|
+|caTrustConfigMapName|The name of the ConfigMap to read CA bundle data from.|trusted-ca|false|
+
+## Results
+|name|description|
+|---|---|
+|IMAGE_DIGEST|Digest of the image just built|
+|IMAGE_URL|Image repository where the built image was pushed|
+|JAVA_COMMUNITY_DEPENDENCIES|The Java dependencies that came from community sources such as Maven central.|
+|SBOM_JAVA_COMPONENTS_COUNT|The counting of Java components by publisher in JSON format|
+
diff --git a/task/buildah-oci-ta/0.2/buildah-oci-ta.yaml b/task/buildah-oci-ta/0.2/buildah-oci-ta.yaml
new file mode 100644
index 0000000000..02deaefda8
--- /dev/null
+++ b/task/buildah-oci-ta/0.2/buildah-oci-ta.yaml
@@ -0,0 +1,572 @@
+---
+apiVersion: tekton.dev/v1
+kind: Task
+metadata:
+ name: buildah-oci-ta
+ annotations:
+ tekton.dev/pipelines.minVersion: 0.12.1
+ tekton.dev/tags: image-build, konflux
+ labels:
+ app.kubernetes.io/version: "0.2"
+ build.appstudio.redhat.com/build_type: docker
+spec:
+ description: |-
+ Buildah task builds source code into a container image and pushes the image into container registry using buildah tool.
+ In addition it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool.
+ When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts.
+ When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment.
+ params:
+ - name: ACTIVATION_KEY
+ description: Name of secret which contains subscription activation key
+ type: string
+ default: activation-key
+ - name: ADDITIONAL_SECRET
+ description: Name of a secret which will be made available to the build
+ with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET
+ type: string
+ default: does-not-exist
+ - name: ADD_CAPABILITIES
+ description: Comma separated list of extra capabilities to add when
+ running 'buildah build'
+ type: string
+ default: ""
+ - name: BUILD_ARGS
+ description: Array of --build-arg values ("arg=value" strings)
+ type: array
+ default: []
+ - name: BUILD_ARGS_FILE
+ description: Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file
+ type: string
+ default: ""
+ - name: CACHI2_ARTIFACT
+ description: The Trusted Artifact URI pointing to the artifact with
+ the prefetched dependencies.
+ type: string
+ default: ""
+ - name: COMMIT_SHA
+ description: The image is built from this commit.
+ type: string
+ default: ""
+ - name: CONTEXT
+ description: Path to the directory to use as context.
+ type: string
+ default: .
+ - name: DOCKERFILE
+ description: Path to the Dockerfile to build.
+ type: string
+ default: ./Dockerfile
+ - name: ENTITLEMENT_SECRET
+ description: Name of secret which contains the entitlement certificates
+ type: string
+ default: etc-pki-entitlement
+ - name: HERMETIC
+ description: Determines if build will be executed without network access.
+ type: string
+ default: "false"
+ - name: IMAGE
+ description: Reference of the image buildah will produce.
+ type: string
+ - name: IMAGE_EXPIRES_AFTER
+ description: Delete image tag after specified time. Empty means to keep
+ the image tag. Time values could be something like 1h, 2d, 3w for
+ hours, days, and weeks, respectively.
+ type: string
+ default: ""
+ - name: PREFETCH_INPUT
+ description: In case it is not empty, the prefetched content should
+ be made available to the build.
+ type: string
+ default: ""
+ - name: SKIP_UNUSED_STAGES
+ description: Whether to skip stages in Containerfile that seem unused
+ by subsequent stages
+ type: string
+ default: "true"
+ - name: SOURCE_ARTIFACT
+ description: The Trusted Artifact URI pointing to the artifact with
+ the application source code.
+ type: string
+ - name: SQUASH
+ description: Squash all new and previous layers added as a part of this
+ build, as per --squash
+ type: string
+ default: "false"
+ - name: STORAGE_DRIVER
+ description: Storage driver to configure for buildah
+ type: string
+ default: vfs
+ - name: TARGET_STAGE
+ description: Target stage in Dockerfile to build. If not specified,
+ the Dockerfile is processed entirely to (and including) its last stage.
+ type: string
+ default: ""
+ - name: TLSVERIFY
+ description: Verify the TLS on the registry endpoint (for push/pull
+ to a non-TLS registry)
+ type: string
+ default: "true"
+ - name: YUM_REPOS_D_FETCHED
+ description: Path in source workspace where dynamically-fetched repos
+ are present
+ default: fetched.repos.d
+ - name: YUM_REPOS_D_SRC
+ description: Path in the git repository in which yum repository files
+ are stored
+ default: repos.d
+ - name: YUM_REPOS_D_TARGET
+ description: Target path on the container in which yum repository files
+ should be made available
+ default: /etc/yum.repos.d
+ - name: caTrustConfigMapKey
+ description: The name of the key in the ConfigMap that contains the
+ CA bundle data.
+ type: string
+ default: ca-bundle.crt
+ - name: caTrustConfigMapName
+ description: The name of the ConfigMap to read CA bundle data from.
+ type: string
+ default: trusted-ca
+ results:
+ - name: IMAGE_DIGEST
+ description: Digest of the image just built
+ - name: IMAGE_URL
+ description: Image repository where the built image was pushed
+ - name: JAVA_COMMUNITY_DEPENDENCIES
+ description: The Java dependencies that came from community sources
+ such as Maven central.
+ - name: SBOM_JAVA_COMPONENTS_COUNT
+ description: The counting of Java components by publisher in JSON format
+ type: string
+ volumes:
+ - name: activation-key
+ secret:
+ optional: true
+ secretName: $(params.ACTIVATION_KEY)
+ - name: additional-secret
+ secret:
+ optional: true
+ secretName: $(params.ADDITIONAL_SECRET)
+ - name: etc-pki-entitlement
+ secret:
+ optional: true
+ secretName: $(params.ENTITLEMENT_SECRET)
+ - name: shared
+ emptyDir: {}
+ - name: trusted-ca
+ configMap:
+ items:
+ - key: $(params.caTrustConfigMapKey)
+ path: ca-bundle.crt
+ name: $(params.caTrustConfigMapName)
+ optional: true
+ - name: varlibcontainers
+ emptyDir: {}
+ - name: workdir
+ emptyDir: {}
+ stepTemplate:
+ env:
+ - name: ACTIVATION_KEY
+ value: $(params.ACTIVATION_KEY)
+ - name: ADDITIONAL_SECRET
+ value: $(params.ADDITIONAL_SECRET)
+ - name: ADD_CAPABILITIES
+ value: $(params.ADD_CAPABILITIES)
+ - name: BUILDAH_FORMAT
+ value: oci
+ - name: BUILD_ARGS_FILE
+ value: $(params.BUILD_ARGS_FILE)
+ - name: CONTEXT
+ value: $(params.CONTEXT)
+ - name: DOCKERFILE
+ value: $(params.DOCKERFILE)
+ - name: ENTITLEMENT_SECRET
+ value: $(params.ENTITLEMENT_SECRET)
+ - name: HERMETIC
+ value: $(params.HERMETIC)
+ - name: IMAGE
+ value: $(params.IMAGE)
+ - name: IMAGE_EXPIRES_AFTER
+ value: $(params.IMAGE_EXPIRES_AFTER)
+ - name: SKIP_UNUSED_STAGES
+ value: $(params.SKIP_UNUSED_STAGES)
+ - name: SQUASH
+ value: $(params.SQUASH)
+ - name: STORAGE_DRIVER
+ value: $(params.STORAGE_DRIVER)
+ - name: TARGET_STAGE
+ value: $(params.TARGET_STAGE)
+ - name: TLSVERIFY
+ value: $(params.TLSVERIFY)
+ - name: YUM_REPOS_D_FETCHED
+ value: $(params.YUM_REPOS_D_FETCHED)
+ - name: YUM_REPOS_D_SRC
+ value: $(params.YUM_REPOS_D_SRC)
+ - name: YUM_REPOS_D_TARGET
+ value: $(params.YUM_REPOS_D_TARGET)
+ volumeMounts:
+ - mountPath: /shared
+ name: shared
+ - mountPath: /var/workdir
+ name: workdir
+ steps:
+ - name: use-trusted-artifact
+ image: quay.io/redhat-appstudio/build-trusted-artifacts:latest@sha256:bf4bfae950fe31d08f44488bb788bea8800cd6d75f5e09fcc21cf98689c61185
+ args:
+ - use
+ - $(params.SOURCE_ARTIFACT)=/var/workdir/source
+ - $(params.CACHI2_ARTIFACT)=/var/workdir/cachi2
+ - name: build
+ image: quay.io/konflux-ci/buildah:latest@sha256:9ef792d74bcc1d330de6be58b61f2cdbfa1c23b74a291eb2136ffd452d373050
+ args:
+ - $(params.BUILD_ARGS[*])
+ workingDir: /var/workdir
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ - mountPath: /entitlement
+ name: etc-pki-entitlement
+ - mountPath: /activation-key
+ name: activation-key
+ - mountPath: /additional-secret
+ name: additional-secret
+ - mountPath: /mnt/trusted-ca
+ name: trusted-ca
+ readOnly: true
+ env:
+ - name: COMMIT_SHA
+ value: $(params.COMMIT_SHA)
+ script: |
+ ca_bundle=/mnt/trusted-ca/ca-bundle.crt
+ if [ -f "$ca_bundle" ]; then
+ echo "INFO: Using mounted CA bundle: $ca_bundle"
+ cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors
+ update-ca-trust
+ fi
+
+ SOURCE_CODE_DIR=source
+ if [ -e "$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" ]; then
+ dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE"
+ elif [ -e "$SOURCE_CODE_DIR/$DOCKERFILE" ]; then
+ dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$DOCKERFILE"
+ elif echo "$DOCKERFILE" | grep -q "^https\?://"; then
+ echo "Fetch Dockerfile from $DOCKERFILE"
+ dockerfile_path=$(mktemp --suffix=-Dockerfile)
+ http_code=$(curl -s -L -w "%{http_code}" --output "$dockerfile_path" "$DOCKERFILE")
+ if [ $http_code != 200 ]; then
+ echo "No Dockerfile is fetched. Server responds $http_code"
+ exit 1
+ fi
+ http_code=$(curl -s -L -w "%{http_code}" --output "$dockerfile_path.dockerignore.tmp" "$DOCKERFILE.dockerignore")
+ if [ $http_code = 200 ]; then
+ echo "Fetched .dockerignore from $DOCKERFILE.dockerignore"
+ mv "$dockerfile_path.dockerignore.tmp" $SOURCE_CODE_DIR/$CONTEXT/.dockerignore
+ fi
+ else
+ echo "Cannot find Dockerfile $DOCKERFILE"
+ exit 1
+ fi
+ if [ -n "$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR" ] && grep -q '^\s*RUN \(./\)\?mvn' "$dockerfile_path"; then
+ sed -i -e "s|^\s*RUN \(\(./\)\?mvn\)\(.*\)|RUN echo \"mirror.defaulthttp://$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR/v1/cache/default/0/*\" > /tmp/settings.yaml; \1 -s /tmp/settings.yaml \3|g" "$dockerfile_path"
+ touch /var/lib/containers/java
+ fi
+
+ # Fixing group permission on /var/lib/containers
+ chown root:root /var/lib/containers
+
+ sed -i 's/^\s*short-name-mode\s*=\s*.*/short-name-mode = "disabled"/' /etc/containers/registries.conf
+
+ # Setting new namespace to run buildah - 2^32-2
+ echo 'root:1:4294967294' | tee -a /etc/subuid >>/etc/subgid
+
+ BUILDAH_ARGS=()
+
+ BASE_IMAGES=$(grep -i '^\s*FROM' "$dockerfile_path" | sed 's/--platform=\S*//' | awk '{print $2}' | (grep -v ^oci-archive: || true))
+ if [ "${HERMETIC}" == "true" ]; then
+ BUILDAH_ARGS+=("--pull=never")
+ UNSHARE_ARGS="--net"
+ for image in $BASE_IMAGES; do
+ if [ "${image}" != "scratch" ]; then
+ unshare -Ufp --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -- buildah pull $image
+ fi
+ done
+ echo "Build will be executed with network isolation"
+ fi
+
+ if [ -n "${TARGET_STAGE}" ]; then
+ BUILDAH_ARGS+=("--target=${TARGET_STAGE}")
+ fi
+
+ if [ -n "${BUILD_ARGS_FILE}" ]; then
+ BUILDAH_ARGS+=("--build-arg-file=$(pwd)/$SOURCE_CODE_DIR/${BUILD_ARGS_FILE}")
+ fi
+
+ for build_arg in "$@"; do
+ BUILDAH_ARGS+=("--build-arg=$build_arg")
+ done
+
+ if [ -n "${ADD_CAPABILITIES}" ]; then
+ BUILDAH_ARGS+=("--cap-add=${ADD_CAPABILITIES}")
+ fi
+
+ if [ "${SQUASH}" == "true" ]; then
+ BUILDAH_ARGS+=("--squash")
+ fi
+
+ if [ "${SKIP_UNUSED_STAGES}" != "true" ]; then
+ BUILDAH_ARGS+=("--skip-unused-stages=false")
+ fi
+
+ if [ -f "/var/workdir/cachi2/cachi2.env" ]; then
+ cp -r "/var/workdir/cachi2" /tmp/
+ chmod -R go+rwX /tmp/cachi2
+ VOLUME_MOUNTS="--volume /tmp/cachi2:/cachi2"
+ sed -i 's|^\s*run |RUN . /cachi2/cachi2.env \&\& \\\n |i' "$dockerfile_path"
+ echo "Prefetched content will be made available"
+
+ prefetched_repo_for_my_arch="/tmp/cachi2/output/deps/rpm/$(uname -m)/repos.d/cachi2.repo"
+ if [ -f "$prefetched_repo_for_my_arch" ]; then
+ echo "Adding $prefetched_repo_for_my_arch to $YUM_REPOS_D_FETCHED"
+ mkdir -p "$YUM_REPOS_D_FETCHED"
+ cp --no-clobber "$prefetched_repo_for_my_arch" "$YUM_REPOS_D_FETCHED"
+ fi
+ fi
+
+ # if yum repofiles stored in git, copy them to mount point outside the source dir
+ if [ -d "${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}" ]; then
+ mkdir -p ${YUM_REPOS_D_FETCHED}
+ cp -r ${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}/* ${YUM_REPOS_D_FETCHED}
+ fi
+
+ # if anything in the repofiles mount point (either fetched or from git), mount it
+ if [ -d "${YUM_REPOS_D_FETCHED}" ]; then
+ chmod -R go+rwX ${YUM_REPOS_D_FETCHED}
+ mount_point=$(realpath ${YUM_REPOS_D_FETCHED})
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume ${mount_point}:${YUM_REPOS_D_TARGET}"
+ fi
+
+ LABELS=(
+ "--label" "build-date=$(date -u +'%Y-%m-%dT%H:%M:%S')"
+ "--label" "architecture=$(uname -m)"
+ "--label" "vcs-type=git"
+ )
+ [ -n "$COMMIT_SHA" ] && LABELS+=("--label" "vcs-ref=$COMMIT_SHA")
+ [ -n "$IMAGE_EXPIRES_AFTER" ] && LABELS+=("--label" "quay.expires-after=$IMAGE_EXPIRES_AFTER")
+
+ ENTITLEMENT_PATH="/entitlement"
+ if [ -d "$ENTITLEMENT_PATH" ]; then
+ cp -r --preserve=mode "$ENTITLEMENT_PATH" /tmp/entitlement
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/entitlement:/etc/pki/entitlement"
+ echo "Adding the entitlement to the build"
+ fi
+
+ ACTIVATION_KEY_PATH="/activation-key"
+ if [ -d "$ACTIVATION_KEY_PATH" ]; then
+ cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/activation-key:/activation-key"
+ echo "Adding activation key to the build"
+ fi
+
+ ADDITIONAL_SECRET_PATH="/additional-secret"
+ ADDITIONAL_SECRET_TMP="/tmp/additional-secret"
+ if [ -d "$ADDITIONAL_SECRET_PATH" ]; then
+ cp -r --preserve=mode -L "$ADDITIONAL_SECRET_PATH" $ADDITIONAL_SECRET_TMP
+ while read -r filename; do
+ echo "Adding the secret ${ADDITIONAL_SECRET}/${filename} to the build, available at /run/secrets/${ADDITIONAL_SECRET}/${filename}"
+ BUILDAH_ARGS+=("--secret=id=${ADDITIONAL_SECRET}/${filename},src=$ADDITIONAL_SECRET_TMP/${filename}")
+ done < <(find $ADDITIONAL_SECRET_TMP -maxdepth 1 -type f -exec basename {} \;)
+ fi
+
+ unshare -Uf $UNSHARE_ARGS --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -w ${SOURCE_CODE_DIR}/$CONTEXT -- buildah build \
+ $VOLUME_MOUNTS \
+ "${BUILDAH_ARGS[@]}" \
+ "${LABELS[@]}" \
+ --tls-verify=$TLSVERIFY --no-cache \
+ --ulimit nofile=4096:4096 \
+ -f "$dockerfile_path" -t $IMAGE .
+
+ container=$(buildah from --pull-never $IMAGE)
+ buildah mount $container | tee /shared/container_path
+ echo $container >/shared/container_name
+
+ # Save the SBOM produced by Cachi2 so it can be merged into the final SBOM later
+ if [ -f "/tmp/cachi2/output/bom.json" ]; then
+ cp /tmp/cachi2/output/bom.json ./sbom-cachi2.json
+ fi
+
+ for image in $BASE_IMAGES; do
+ if [ "${image}" != "scratch" ]; then
+ buildah images --format '{{ .Name }}:{{ .Tag }}@{{ .Digest }}' --filter reference="$image" >>/shared/base_images_digests
+ fi
+ done
+
+ # Needed to generate base images SBOM
+ echo "$BASE_IMAGES" >/shared/base_images_from_dockerfile
+ computeResources:
+ limits:
+ memory: 4Gi
+ requests:
+ cpu: 250m
+ memory: 512Mi
+ securityContext:
+ capabilities:
+ add:
+ - SETFCAP
+ - name: sbom-syft-generate
+ image: quay.io/redhat-appstudio/syft:v0.105.1@sha256:1910b829997650c696881e5fc2fc654ddf3184c27edb1b2024e9cb2ba51ac431
+ workingDir: /var/workdir/source
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ - mountPath: /shared
+ name: shared
+ script: |
+ echo "Running syft on the source directory"
+ syft dir:/var/workdir/source --output cyclonedx-json=/var/workdir/sbom-source.json
+ find $(cat /shared/container_path) -xtype l -delete
+ echo "Running syft on the image filesystem"
+ syft dir:$(cat /shared/container_path) --output cyclonedx-json=/var/workdir/sbom-image.json
+ - name: analyse-dependencies-java-sbom
+ image: quay.io/redhat-appstudio/hacbs-jvm-build-request-processor:127ee0c223a2b56a9bd20a6f2eaeed3bd6015f77
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ - mountPath: /shared
+ name: shared
+ script: |
+ if [ -f /var/lib/containers/java ]; then
+ /opt/jboss/container/java/run/run-java.sh analyse-dependencies path $(cat /shared/container_path) -s /var/workdir/sbom-image.json --task-run-name $(context.taskRun.name) --publishers $(results.SBOM_JAVA_COMPONENTS_COUNT.path)
+ sed -i 's/^/ /' $(results.SBOM_JAVA_COMPONENTS_COUNT.path) # Workaround for SRVKP-2875
+ else
+ touch $(results.JAVA_COMMUNITY_DEPENDENCIES.path)
+ fi
+ securityContext:
+ runAsUser: 0
+ - name: merge-syft-sboms
+ image: registry.access.redhat.com/ubi9/python-39:1-172.1712567222@sha256:c96f839e927c52990143df4efb2872946fcd5de9e1ed2014947bb2cf3084c27a
+ workingDir: /var/workdir
+ script: |
+ #!/bin/python3
+ import json
+
+ # load SBOMs
+ with open("./sbom-image.json") as f:
+ image_sbom = json.load(f)
+
+ with open("./sbom-source.json") as f:
+ source_sbom = json.load(f)
+
+ # fetch unique components from available SBOMs
+ def get_identifier(component):
+ return component["name"] + '@' + component.get("version", "")
+
+ image_sbom_components = image_sbom.setdefault("components", [])
+ existing_components = [get_identifier(component) for component in image_sbom_components]
+
+ source_sbom_components = source_sbom.get("components", [])
+ for component in source_sbom_components:
+ if get_identifier(component) not in existing_components:
+ image_sbom_components.append(component)
+ existing_components.append(get_identifier(component))
+
+ image_sbom_components.sort(key=lambda c: get_identifier(c))
+
+ # write the CycloneDX unified SBOM
+ with open("./sbom-cyclonedx.json", "w") as f:
+ json.dump(image_sbom, f, indent=4)
+ securityContext:
+ runAsUser: 0
+ - name: merge-cachi2-sbom
+ image: quay.io/redhat-appstudio/cachi2:0.8.0@sha256:5cf15d6f3fb151a3e12c8a17024062b7cc62b0c3e1b165e4a9fa5bf7a77bdc30
+ workingDir: /var/workdir
+ script: |
+ if [ -f "sbom-cachi2.json" ]; then
+ echo "Merging contents of sbom-cachi2.json into sbom-cyclonedx.json"
+ /src/utils/merge_syft_sbom.py sbom-cachi2.json sbom-cyclonedx.json >sbom-temp.json
+ mv sbom-temp.json sbom-cyclonedx.json
+ else
+ echo "Skipping step since no Cachi2 SBOM was produced"
+ fi
+ securityContext:
+ runAsUser: 0
+ - name: create-purl-sbom
+ image: registry.access.redhat.com/ubi9/python-39:1-172.1712567222@sha256:c96f839e927c52990143df4efb2872946fcd5de9e1ed2014947bb2cf3084c27a
+ workingDir: /var/workdir
+ script: |
+ #!/bin/python3
+ import json
+
+ with open("./sbom-cyclonedx.json") as f:
+ cyclonedx_sbom = json.load(f)
+
+ purls = [{"purl": component["purl"]} for component in cyclonedx_sbom.get("components", []) if "purl" in component]
+ purl_content = {"image_contents": {"dependencies": purls}}
+
+ with open("sbom-purl.json", "w") as output_file:
+ json.dump(purl_content, output_file, indent=4)
+ securityContext:
+ runAsUser: 0
+ - name: create-base-images-sbom
+ image: quay.io/redhat-appstudio/base-images-sbom-script@sha256:667669e3def018f9dbb8eaf8868887a40bc07842221e9a98f6787edcff021840
+ workingDir: /var/workdir
+ script: |
+ python3 /app/base_images_sbom_script.py \
+ --sbom=sbom-cyclonedx.json \
+ --base-images-from-dockerfile=/shared/base_images_from_dockerfile \
+ --base-images-digests=/shared/base_images_digests
+ securityContext:
+ runAsUser: 0
+ - name: inject-sbom-and-push
+ image: quay.io/konflux-ci/buildah:latest@sha256:9ef792d74bcc1d330de6be58b61f2cdbfa1c23b74a291eb2136ffd452d373050
+ workingDir: /var/workdir
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ script: |
+ base_image_name=$(buildah inspect --format '{{ index .ImageAnnotations "org.opencontainers.image.base.name"}}' $IMAGE | cut -f1 -d'@')
+ base_image_digest=$(buildah inspect --format '{{ index .ImageAnnotations "org.opencontainers.image.base.digest"}}' $IMAGE)
+ container=$(buildah from --pull-never $IMAGE)
+ buildah copy $container sbom-cyclonedx.json sbom-purl.json /root/buildinfo/content_manifests/
+ buildah config -a org.opencontainers.image.base.name=${base_image_name} -a org.opencontainers.image.base.digest=${base_image_digest} $container
+
+ BUILDAH_ARGS=()
+ if [ "${SQUASH}" == "true" ]; then
+ BUILDAH_ARGS+=("--squash")
+ fi
+
+ buildah commit "${BUILDAH_ARGS[@]}" $container $IMAGE
+
+ status=-1
+ max_run=5
+ sleep_sec=10
+ for run in $(seq 1 $max_run); do
+ status=0
+ [ "$run" -gt 1 ] && sleep $sleep_sec
+ echo "Pushing sbom image to registry"
+ buildah push \
+ --tls-verify=$TLSVERIFY \
+ --digestfile /var/workdir/image-digest $IMAGE \
+ docker://$IMAGE && break || status=$?
+ done
+ if [ "$status" -ne 0 ]; then
+ echo "Failed to push sbom image to registry after ${max_run} tries"
+ exit 1
+ fi
+
+ cat "/var/workdir"/image-digest | tee $(results.IMAGE_DIGEST.path)
+ echo -n "$IMAGE" | tee $(results.IMAGE_URL.path)
+ securityContext:
+ capabilities:
+ add:
+ - SETFCAP
+ runAsUser: 0
+ - name: upload-sbom
+ image: quay.io/redhat-appstudio/cosign:v2.1.1@sha256:c883d6f8d39148f2cea71bff4622d196d89df3e510f36c140c097b932f0dd5d5
+ args:
+ - attach
+ - sbom
+ - --sbom
+ - sbom-cyclonedx.json
+ - --type
+ - cyclonedx
+ - $(params.IMAGE)
+ workingDir: /var/workdir
diff --git a/task/buildah-oci-ta/0.2/recipe.yaml b/task/buildah-oci-ta/0.2/recipe.yaml
new file mode 100644
index 0000000000..4f84a9062d
--- /dev/null
+++ b/task/buildah-oci-ta/0.2/recipe.yaml
@@ -0,0 +1,18 @@
+---
+base: ../../buildah/0.2/buildah.yaml
+removeParams:
+ - BUILDER_IMAGE
+add:
+ - use-source
+ - use-cachi2
+removeWorkspaces:
+ - source
+replacements:
+ workspaces.source.path: /var/workdir
+regexReplacements:
+ "/workspace(/.*)": /var/workdir$1
+description: |-
+ Buildah task builds source code into a container image and pushes the image into container registry using buildah tool.
+ In addition it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool.
+ When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts.
+ When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment.
diff --git a/task/buildah-remote-oci-ta/0.1/buildah-remote-oci-ta.yaml b/task/buildah-remote-oci-ta/0.1/buildah-remote-oci-ta.yaml
index 6fce7565a2..559702b308 100644
--- a/task/buildah-remote-oci-ta/0.1/buildah-remote-oci-ta.yaml
+++ b/task/buildah-remote-oci-ta/0.1/buildah-remote-oci-ta.yaml
@@ -16,6 +16,10 @@ spec:
When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts.
When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment.
params:
+ - default: activation-key
+ description: Name of secret which contains subscription activation key
+ name: ACTIVATION_KEY
+ type: string
- default: does-not-exist
description: Name of a secret which will be made available to the build with 'buildah
build --secret' at /run/secrets/$ADDITIONAL_SECRET
@@ -143,6 +147,8 @@ spec:
stepTemplate:
computeResources: {}
env:
+ - name: ACTIVATION_KEY
+ value: $(params.ACTIVATION_KEY)
- name: ADDITIONAL_SECRET
value: $(params.ADDITIONAL_SECRET)
- name: ADD_CAPABILITIES
@@ -238,6 +244,7 @@ spec:
rsync -ra /shared/ "$SSH_HOST:$BUILD_DIR/volumes/shared/"
rsync -ra /var/workdir/ "$SSH_HOST:$BUILD_DIR/volumes/workdir/"
rsync -ra /entitlement/ "$SSH_HOST:$BUILD_DIR/volumes/etc-pki-entitlement/"
+ rsync -ra /activation-key/ "$SSH_HOST:$BUILD_DIR/volumes/activation-key/"
rsync -ra /additional-secret/ "$SSH_HOST:$BUILD_DIR/volumes/additional-secret/"
rsync -ra /mnt/trusted-ca/ "$SSH_HOST:$BUILD_DIR/volumes/trusted-ca/"
rsync -ra "$HOME/.docker/" "$SSH_HOST:$BUILD_DIR/.docker/"
@@ -370,6 +377,13 @@ spec:
echo "Adding the entitlement to the build"
fi
+ ACTIVATION_KEY_PATH="/activation-key"
+ if [ -d "$ACTIVATION_KEY_PATH" ]; then
+ cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/activation-key:/activation-key"
+ echo "Adding activation key to the build"
+ fi
+
ADDITIONAL_SECRET_PATH="/additional-secret"
ADDITIONAL_SECRET_TMP="/tmp/additional-secret"
if [ -d "$ADDITIONAL_SECRET_PATH" ]; then
@@ -413,6 +427,8 @@ spec:
chmod +x scripts/script-build.sh
rsync -ra scripts "$SSH_HOST:$BUILD_DIR"
ssh $SSH_ARGS "$SSH_HOST" $PORT_FORWARD podman run $PODMAN_PORT_FORWARD \
+ --tmpfs /run/secrets \
+ -e ACTIVATION_KEY="$ACTIVATION_KEY" \
-e ADDITIONAL_SECRET="$ADDITIONAL_SECRET" \
-e ADD_CAPABILITIES="$ADD_CAPABILITIES" \
-e BUILDAH_FORMAT="$BUILDAH_FORMAT" \
@@ -435,6 +451,7 @@ spec:
-v "$BUILD_DIR/volumes/shared:/shared:Z" \
-v "$BUILD_DIR/volumes/workdir:/var/workdir:Z" \
-v "$BUILD_DIR/volumes/etc-pki-entitlement:/entitlement:Z" \
+ -v "$BUILD_DIR/volumes/activation-key:/activation-key:Z" \
-v "$BUILD_DIR/volumes/additional-secret:/additional-secret:Z" \
-v "$BUILD_DIR/volumes/trusted-ca:/mnt/trusted-ca:Z" \
-v "$BUILD_DIR/.docker/:/root/.docker:Z" \
@@ -459,6 +476,8 @@ spec:
name: varlibcontainers
- mountPath: /entitlement
name: etc-pki-entitlement
+ - mountPath: /activation-key
+ name: activation-key
- mountPath: /additional-secret
name: additional-secret
- mountPath: /mnt/trusted-ca
@@ -636,6 +655,10 @@ spec:
name: upload-sbom
workingDir: /var/workdir
volumes:
+ - name: activation-key
+ secret:
+ optional: true
+ secretName: $(params.ACTIVATION_KEY)
- name: additional-secret
secret:
optional: true
diff --git a/task/buildah-remote-oci-ta/0.2/MIGRATION.md b/task/buildah-remote-oci-ta/0.2/MIGRATION.md
new file mode 100644
index 0000000000..d6812093cc
--- /dev/null
+++ b/task/buildah-remote-oci-ta/0.2/MIGRATION.md
@@ -0,0 +1,48 @@
+# Migration from 0.1 to 0.2
+
+Version 0.2:
+
+* Removes the `BASE_IMAGES_DIGESTS` result. Please remove all the references to this
+ result from your pipeline.
+ * Base images and their digests can be found in the SBOM for the output image.
+* Removes the `DOCKER_AUTH` param. It didn't do anything in the later releases of
+ version 0.1. Please stop passing this param to the buildah task if you used to
+ do so with version 0.1.
+
+## Konflux-specific
+
+In a typical Konflux pipeline, the two tasks that used to depend on the `BASE_IMAGES_DIGESTS`
+result are `build-source-image` and `deprecated-base-image-check`.
+
+1. Make sure your version of `deprecated-base-image-check` is at least `0.4`.
+2. Make sure your version of `build-source-image` supports reading base images from
+ the SBOM. Version `0.1` supports it since 2024-07-15. In the logs of your build
+ pipeline, you should see that the build-source-image task now has a GET-BASE-IMAGES
+ step. Once you stop passing the `BASE_IMAGES_DIGESTS` param, this step will emit
+ logs about handling the SBOM.
+3. Remove the parameters that reference the `BASE_IMAGES_DIGESTS` result:
+
+```diff
+@@ -255,10 +255,8 @@ spec:
+ - name: build-source-image
+ params:
+ - name: BINARY_IMAGE
+ value: $(params.output-image)
+- - name: BASE_IMAGES
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ runAfter:
+ - build-container
+ taskRef:
+ params:
+@@ -282,10 +280,8 @@ spec:
+ - name: workspace
+ workspace: workspace
+ - name: deprecated-base-image-check
+ params:
+- - name: BASE_IMAGES_DIGESTS
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ - name: IMAGE_URL
+ value: $(tasks.build-container.results.IMAGE_URL)
+ - name: IMAGE_DIGEST
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
+```
diff --git a/task/buildah-remote-oci-ta/0.2/buildah-remote-oci-ta.yaml b/task/buildah-remote-oci-ta/0.2/buildah-remote-oci-ta.yaml
new file mode 100644
index 0000000000..bc1c1ba014
--- /dev/null
+++ b/task/buildah-remote-oci-ta/0.2/buildah-remote-oci-ta.yaml
@@ -0,0 +1,678 @@
+apiVersion: tekton.dev/v1
+kind: Task
+metadata:
+ annotations:
+ tekton.dev/pipelines.minVersion: 0.12.1
+ tekton.dev/tags: image-build, konflux
+ creationTimestamp: null
+ labels:
+ app.kubernetes.io/version: "0.2"
+ build.appstudio.redhat.com/build_type: docker
+ name: buildah-remote-oci-ta
+spec:
+ description: |-
+ Buildah task builds source code into a container image and pushes the image into container registry using buildah tool.
+ In addition it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool.
+ When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts.
+ When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment.
+ params:
+ - default: activation-key
+ description: Name of secret which contains subscription activation key
+ name: ACTIVATION_KEY
+ type: string
+ - default: does-not-exist
+ description: Name of a secret which will be made available to the build with 'buildah
+ build --secret' at /run/secrets/$ADDITIONAL_SECRET
+ name: ADDITIONAL_SECRET
+ type: string
+ - default: ""
+ description: Comma separated list of extra capabilities to add when running 'buildah
+ build'
+ name: ADD_CAPABILITIES
+ type: string
+ - default: []
+ description: Array of --build-arg values ("arg=value" strings)
+ name: BUILD_ARGS
+ type: array
+ - default: ""
+ description: Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file
+ name: BUILD_ARGS_FILE
+ type: string
+ - default: ""
+ description: The Trusted Artifact URI pointing to the artifact with the prefetched
+ dependencies.
+ name: CACHI2_ARTIFACT
+ type: string
+ - default: ""
+ description: The image is built from this commit.
+ name: COMMIT_SHA
+ type: string
+ - default: .
+ description: Path to the directory to use as context.
+ name: CONTEXT
+ type: string
+ - default: ./Dockerfile
+ description: Path to the Dockerfile to build.
+ name: DOCKERFILE
+ type: string
+ - default: etc-pki-entitlement
+ description: Name of secret which contains the entitlement certificates
+ name: ENTITLEMENT_SECRET
+ type: string
+ - default: "false"
+ description: Determines if build will be executed without network access.
+ name: HERMETIC
+ type: string
+ - description: Reference of the image buildah will produce.
+ name: IMAGE
+ type: string
+ - default: ""
+ description: Delete image tag after specified time. Empty means to keep the image
+ tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks,
+ respectively.
+ name: IMAGE_EXPIRES_AFTER
+ type: string
+ - default: ""
+ description: In case it is not empty, the prefetched content should be made available
+ to the build.
+ name: PREFETCH_INPUT
+ type: string
+ - default: "true"
+ description: Whether to skip stages in Containerfile that seem unused by subsequent
+ stages
+ name: SKIP_UNUSED_STAGES
+ type: string
+ - description: The Trusted Artifact URI pointing to the artifact with the application
+ source code.
+ name: SOURCE_ARTIFACT
+ type: string
+ - default: "false"
+ description: Squash all new and previous layers added as a part of this build,
+ as per --squash
+ name: SQUASH
+ type: string
+ - default: vfs
+ description: Storage driver to configure for buildah
+ name: STORAGE_DRIVER
+ type: string
+ - default: ""
+ description: Target stage in Dockerfile to build. If not specified, the Dockerfile
+ is processed entirely to (and including) its last stage.
+ name: TARGET_STAGE
+ type: string
+ - default: "true"
+ description: Verify the TLS on the registry endpoint (for push/pull to a non-TLS
+ registry)
+ name: TLSVERIFY
+ type: string
+ - default: fetched.repos.d
+ description: Path in source workspace where dynamically-fetched repos are present
+ name: YUM_REPOS_D_FETCHED
+ - default: repos.d
+ description: Path in the git repository in which yum repository files are stored
+ name: YUM_REPOS_D_SRC
+ - default: /etc/yum.repos.d
+ description: Target path on the container in which yum repository files should
+ be made available
+ name: YUM_REPOS_D_TARGET
+ - default: ca-bundle.crt
+ description: The name of the key in the ConfigMap that contains the CA bundle
+ data.
+ name: caTrustConfigMapKey
+ type: string
+ - default: trusted-ca
+ description: The name of the ConfigMap to read CA bundle data from.
+ name: caTrustConfigMapName
+ type: string
+ - description: The platform to build on
+ name: PLATFORM
+ type: string
+ results:
+ - description: Digest of the image just built
+ name: IMAGE_DIGEST
+ - description: Image repository where the built image was pushed
+ name: IMAGE_URL
+ - description: The Java dependencies that came from community sources such as Maven
+ central.
+ name: JAVA_COMMUNITY_DEPENDENCIES
+ - description: The counting of Java components by publisher in JSON format
+ name: SBOM_JAVA_COMPONENTS_COUNT
+ type: string
+ stepTemplate:
+ computeResources: {}
+ env:
+ - name: ACTIVATION_KEY
+ value: $(params.ACTIVATION_KEY)
+ - name: ADDITIONAL_SECRET
+ value: $(params.ADDITIONAL_SECRET)
+ - name: ADD_CAPABILITIES
+ value: $(params.ADD_CAPABILITIES)
+ - name: BUILDAH_FORMAT
+ value: oci
+ - name: BUILD_ARGS_FILE
+ value: $(params.BUILD_ARGS_FILE)
+ - name: CONTEXT
+ value: $(params.CONTEXT)
+ - name: DOCKERFILE
+ value: $(params.DOCKERFILE)
+ - name: ENTITLEMENT_SECRET
+ value: $(params.ENTITLEMENT_SECRET)
+ - name: HERMETIC
+ value: $(params.HERMETIC)
+ - name: IMAGE
+ value: $(params.IMAGE)
+ - name: IMAGE_EXPIRES_AFTER
+ value: $(params.IMAGE_EXPIRES_AFTER)
+ - name: SKIP_UNUSED_STAGES
+ value: $(params.SKIP_UNUSED_STAGES)
+ - name: SQUASH
+ value: $(params.SQUASH)
+ - name: STORAGE_DRIVER
+ value: $(params.STORAGE_DRIVER)
+ - name: TARGET_STAGE
+ value: $(params.TARGET_STAGE)
+ - name: TLSVERIFY
+ value: $(params.TLSVERIFY)
+ - name: YUM_REPOS_D_FETCHED
+ value: $(params.YUM_REPOS_D_FETCHED)
+ - name: YUM_REPOS_D_SRC
+ value: $(params.YUM_REPOS_D_SRC)
+ - name: YUM_REPOS_D_TARGET
+ value: $(params.YUM_REPOS_D_TARGET)
+ - name: BUILDER_IMAGE
+ value: quay.io/konflux-ci/buildah:latest@sha256:9ef792d74bcc1d330de6be58b61f2cdbfa1c23b74a291eb2136ffd452d373050
+ volumeMounts:
+ - mountPath: /shared
+ name: shared
+ - mountPath: /var/workdir
+ name: workdir
+ steps:
+ - args:
+ - use
+ - $(params.SOURCE_ARTIFACT)=/var/workdir/source
+ - $(params.CACHI2_ARTIFACT)=/var/workdir/cachi2
+ computeResources: {}
+ image: quay.io/redhat-appstudio/build-trusted-artifacts:latest@sha256:bf4bfae950fe31d08f44488bb788bea8800cd6d75f5e09fcc21cf98689c61185
+ name: use-trusted-artifact
+ - args:
+ - $(params.BUILD_ARGS[*])
+ computeResources:
+ limits:
+ memory: 4Gi
+ requests:
+ cpu: 250m
+ memory: 512Mi
+ env:
+ - name: COMMIT_SHA
+ value: $(params.COMMIT_SHA)
+ image: quay.io/redhat-appstudio/multi-platform-runner:01c7670e81d5120347cf0ad13372742489985e5f@sha256:246adeaaba600e207131d63a7f706cffdcdc37d8f600c56187123ec62823ff44
+ name: build
+ script: |-
+ set -o verbose
+ mkdir -p ~/.ssh
+ if [ -e "/ssh/error" ]; then
+ #no server could be provisioned
+ cat /ssh/error
+ exit 1
+ elif [ -e "/ssh/otp" ]; then
+ curl --cacert /ssh/otp-ca -XPOST -d @/ssh/otp $(cat /ssh/otp-server) >~/.ssh/id_rsa
+ echo "" >> ~/.ssh/id_rsa
+ else
+ cp /ssh/id_rsa ~/.ssh
+ fi
+ chmod 0400 ~/.ssh/id_rsa
+ export SSH_HOST=$(cat /ssh/host)
+ export BUILD_DIR=$(cat /ssh/user-dir)
+ export SSH_ARGS="-o StrictHostKeyChecking=no"
+ mkdir -p scripts
+ echo "$BUILD_DIR"
+ ssh $SSH_ARGS "$SSH_HOST" mkdir -p "$BUILD_DIR/workspaces" "$BUILD_DIR/scripts" "$BUILD_DIR/volumes"
+
+ PORT_FORWARD=""
+ PODMAN_PORT_FORWARD=""
+ if [ -n "$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR" ] ; then
+ PORT_FORWARD=" -L 80:$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR:80"
+ PODMAN_PORT_FORWARD=" -e JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR=localhost"
+ fi
+
+ rsync -ra /shared/ "$SSH_HOST:$BUILD_DIR/volumes/shared/"
+ rsync -ra /var/workdir/ "$SSH_HOST:$BUILD_DIR/volumes/workdir/"
+ rsync -ra /entitlement/ "$SSH_HOST:$BUILD_DIR/volumes/etc-pki-entitlement/"
+ rsync -ra /activation-key/ "$SSH_HOST:$BUILD_DIR/volumes/activation-key/"
+ rsync -ra /additional-secret/ "$SSH_HOST:$BUILD_DIR/volumes/additional-secret/"
+ rsync -ra /mnt/trusted-ca/ "$SSH_HOST:$BUILD_DIR/volumes/trusted-ca/"
+ rsync -ra "$HOME/.docker/" "$SSH_HOST:$BUILD_DIR/.docker/"
+ rsync -ra "/tekton/results/" "$SSH_HOST:$BUILD_DIR/tekton-results/"
+ cat >scripts/script-build.sh <<'REMOTESSHEOF'
+ #!/bin/bash
+ set -o verbose
+ set -e
+ cd /var/workdir
+ ca_bundle=/mnt/trusted-ca/ca-bundle.crt
+ if [ -f "$ca_bundle" ]; then
+ echo "INFO: Using mounted CA bundle: $ca_bundle"
+ cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors
+ update-ca-trust
+ fi
+
+ SOURCE_CODE_DIR=source
+ if [ -e "$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" ]; then
+ dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE"
+ elif [ -e "$SOURCE_CODE_DIR/$DOCKERFILE" ]; then
+ dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$DOCKERFILE"
+ elif echo "$DOCKERFILE" | grep -q "^https\?://"; then
+ echo "Fetch Dockerfile from $DOCKERFILE"
+ dockerfile_path=$(mktemp --suffix=-Dockerfile)
+ http_code=$(curl -s -L -w "%{http_code}" --output "$dockerfile_path" "$DOCKERFILE")
+ if [ $http_code != 200 ]; then
+ echo "No Dockerfile is fetched. Server responds $http_code"
+ exit 1
+ fi
+ http_code=$(curl -s -L -w "%{http_code}" --output "$dockerfile_path.dockerignore.tmp" "$DOCKERFILE.dockerignore")
+ if [ $http_code = 200 ]; then
+ echo "Fetched .dockerignore from $DOCKERFILE.dockerignore"
+ mv "$dockerfile_path.dockerignore.tmp" $SOURCE_CODE_DIR/$CONTEXT/.dockerignore
+ fi
+ else
+ echo "Cannot find Dockerfile $DOCKERFILE"
+ exit 1
+ fi
+ if [ -n "$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR" ] && grep -q '^\s*RUN \(./\)\?mvn' "$dockerfile_path"; then
+ sed -i -e "s|^\s*RUN \(\(./\)\?mvn\)\(.*\)|RUN echo \"mirror.defaulthttp://$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR/v1/cache/default/0/*\" > /tmp/settings.yaml; \1 -s /tmp/settings.yaml \3|g" "$dockerfile_path"
+ touch /var/lib/containers/java
+ fi
+
+ # Fixing group permission on /var/lib/containers
+ chown root:root /var/lib/containers
+
+ sed -i 's/^\s*short-name-mode\s*=\s*.*/short-name-mode = "disabled"/' /etc/containers/registries.conf
+
+ # Setting new namespace to run buildah - 2^32-2
+ echo 'root:1:4294967294' | tee -a /etc/subuid >>/etc/subgid
+
+ BUILDAH_ARGS=()
+
+ BASE_IMAGES=$(grep -i '^\s*FROM' "$dockerfile_path" | sed 's/--platform=\S*//' | awk '{print $2}' | (grep -v ^oci-archive: || true))
+ if [ "${HERMETIC}" == "true" ]; then
+ BUILDAH_ARGS+=("--pull=never")
+ UNSHARE_ARGS="--net"
+ for image in $BASE_IMAGES; do
+ if [ "${image}" != "scratch" ]; then
+ unshare -Ufp --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -- buildah pull $image
+ fi
+ done
+ echo "Build will be executed with network isolation"
+ fi
+
+ if [ -n "${TARGET_STAGE}" ]; then
+ BUILDAH_ARGS+=("--target=${TARGET_STAGE}")
+ fi
+
+ if [ -n "${BUILD_ARGS_FILE}" ]; then
+ BUILDAH_ARGS+=("--build-arg-file=$(pwd)/$SOURCE_CODE_DIR/${BUILD_ARGS_FILE}")
+ fi
+
+ for build_arg in "$@"; do
+ BUILDAH_ARGS+=("--build-arg=$build_arg")
+ done
+
+ if [ -n "${ADD_CAPABILITIES}" ]; then
+ BUILDAH_ARGS+=("--cap-add=${ADD_CAPABILITIES}")
+ fi
+
+ if [ "${SQUASH}" == "true" ]; then
+ BUILDAH_ARGS+=("--squash")
+ fi
+
+ if [ "${SKIP_UNUSED_STAGES}" != "true" ]; then
+ BUILDAH_ARGS+=("--skip-unused-stages=false")
+ fi
+
+ if [ -f "/var/workdir/cachi2/cachi2.env" ]; then
+ cp -r "/var/workdir/cachi2" /tmp/
+ chmod -R go+rwX /tmp/cachi2
+ VOLUME_MOUNTS="--volume /tmp/cachi2:/cachi2"
+ sed -i 's|^\s*run |RUN . /cachi2/cachi2.env \&\& \\\n |i' "$dockerfile_path"
+ echo "Prefetched content will be made available"
+
+ prefetched_repo_for_my_arch="/tmp/cachi2/output/deps/rpm/$(uname -m)/repos.d/cachi2.repo"
+ if [ -f "$prefetched_repo_for_my_arch" ]; then
+ echo "Adding $prefetched_repo_for_my_arch to $YUM_REPOS_D_FETCHED"
+ mkdir -p "$YUM_REPOS_D_FETCHED"
+ cp --no-clobber "$prefetched_repo_for_my_arch" "$YUM_REPOS_D_FETCHED"
+ fi
+ fi
+
+ # if yum repofiles stored in git, copy them to mount point outside the source dir
+ if [ -d "${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}" ]; then
+ mkdir -p ${YUM_REPOS_D_FETCHED}
+ cp -r ${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}/* ${YUM_REPOS_D_FETCHED}
+ fi
+
+ # if anything in the repofiles mount point (either fetched or from git), mount it
+ if [ -d "${YUM_REPOS_D_FETCHED}" ]; then
+ chmod -R go+rwX ${YUM_REPOS_D_FETCHED}
+ mount_point=$(realpath ${YUM_REPOS_D_FETCHED})
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume ${mount_point}:${YUM_REPOS_D_TARGET}"
+ fi
+
+ LABELS=(
+ "--label" "build-date=$(date -u +'%Y-%m-%dT%H:%M:%S')"
+ "--label" "architecture=$(uname -m)"
+ "--label" "vcs-type=git"
+ )
+ [ -n "$COMMIT_SHA" ] && LABELS+=("--label" "vcs-ref=$COMMIT_SHA")
+ [ -n "$IMAGE_EXPIRES_AFTER" ] && LABELS+=("--label" "quay.expires-after=$IMAGE_EXPIRES_AFTER")
+
+ ENTITLEMENT_PATH="/entitlement"
+ if [ -d "$ENTITLEMENT_PATH" ]; then
+ cp -r --preserve=mode "$ENTITLEMENT_PATH" /tmp/entitlement
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/entitlement:/etc/pki/entitlement"
+ echo "Adding the entitlement to the build"
+ fi
+
+ ACTIVATION_KEY_PATH="/activation-key"
+ if [ -d "$ACTIVATION_KEY_PATH" ]; then
+ cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/activation-key:/activation-key"
+ echo "Adding activation key to the build"
+ fi
+
+ ADDITIONAL_SECRET_PATH="/additional-secret"
+ ADDITIONAL_SECRET_TMP="/tmp/additional-secret"
+ if [ -d "$ADDITIONAL_SECRET_PATH" ]; then
+ cp -r --preserve=mode -L "$ADDITIONAL_SECRET_PATH" $ADDITIONAL_SECRET_TMP
+ while read -r filename; do
+ echo "Adding the secret ${ADDITIONAL_SECRET}/${filename} to the build, available at /run/secrets/${ADDITIONAL_SECRET}/${filename}"
+ BUILDAH_ARGS+=("--secret=id=${ADDITIONAL_SECRET}/${filename},src=$ADDITIONAL_SECRET_TMP/${filename}")
+ done < <(find $ADDITIONAL_SECRET_TMP -maxdepth 1 -type f -exec basename {} \;)
+ fi
+
+ unshare -Uf $UNSHARE_ARGS --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -w ${SOURCE_CODE_DIR}/$CONTEXT -- buildah build \
+ $VOLUME_MOUNTS \
+ "${BUILDAH_ARGS[@]}" \
+ "${LABELS[@]}" \
+ --tls-verify=$TLSVERIFY --no-cache \
+ --ulimit nofile=4096:4096 \
+ -f "$dockerfile_path" -t $IMAGE .
+
+ container=$(buildah from --pull-never $IMAGE)
+ buildah mount $container | tee /shared/container_path
+ echo $container >/shared/container_name
+
+ # Save the SBOM produced by Cachi2 so it can be merged into the final SBOM later
+ if [ -f "/tmp/cachi2/output/bom.json" ]; then
+ cp /tmp/cachi2/output/bom.json ./sbom-cachi2.json
+ fi
+
+ for image in $BASE_IMAGES; do
+ if [ "${image}" != "scratch" ]; then
+ buildah images --format '{{ .Name }}:{{ .Tag }}@{{ .Digest }}' --filter reference="$image" >>/shared/base_images_digests
+ fi
+ done
+
+ # Needed to generate base images SBOM
+ echo "$BASE_IMAGES" >/shared/base_images_from_dockerfile
+
+ buildah push "$IMAGE" oci:rhtap-final-image
+ REMOTESSHEOF
+ chmod +x scripts/script-build.sh
+ rsync -ra scripts "$SSH_HOST:$BUILD_DIR"
+ ssh $SSH_ARGS "$SSH_HOST" $PORT_FORWARD podman run $PODMAN_PORT_FORWARD \
+ --tmpfs /run/secrets \
+ -e ACTIVATION_KEY="$ACTIVATION_KEY" \
+ -e ADDITIONAL_SECRET="$ADDITIONAL_SECRET" \
+ -e ADD_CAPABILITIES="$ADD_CAPABILITIES" \
+ -e BUILDAH_FORMAT="$BUILDAH_FORMAT" \
+ -e BUILD_ARGS_FILE="$BUILD_ARGS_FILE" \
+ -e CONTEXT="$CONTEXT" \
+ -e DOCKERFILE="$DOCKERFILE" \
+ -e ENTITLEMENT_SECRET="$ENTITLEMENT_SECRET" \
+ -e HERMETIC="$HERMETIC" \
+ -e IMAGE="$IMAGE" \
+ -e IMAGE_EXPIRES_AFTER="$IMAGE_EXPIRES_AFTER" \
+ -e SKIP_UNUSED_STAGES="$SKIP_UNUSED_STAGES" \
+ -e SQUASH="$SQUASH" \
+ -e STORAGE_DRIVER="$STORAGE_DRIVER" \
+ -e TARGET_STAGE="$TARGET_STAGE" \
+ -e TLSVERIFY="$TLSVERIFY" \
+ -e YUM_REPOS_D_FETCHED="$YUM_REPOS_D_FETCHED" \
+ -e YUM_REPOS_D_SRC="$YUM_REPOS_D_SRC" \
+ -e YUM_REPOS_D_TARGET="$YUM_REPOS_D_TARGET" \
+ -e COMMIT_SHA="$COMMIT_SHA" \
+ -v "$BUILD_DIR/volumes/shared:/shared:Z" \
+ -v "$BUILD_DIR/volumes/workdir:/var/workdir:Z" \
+ -v "$BUILD_DIR/volumes/etc-pki-entitlement:/entitlement:Z" \
+ -v "$BUILD_DIR/volumes/activation-key:/activation-key:Z" \
+ -v "$BUILD_DIR/volumes/additional-secret:/additional-secret:Z" \
+ -v "$BUILD_DIR/volumes/trusted-ca:/mnt/trusted-ca:Z" \
+ -v "$BUILD_DIR/.docker/:/root/.docker:Z" \
+ -v "$BUILD_DIR/tekton-results/:/tekton/results:Z" \
+ -v $BUILD_DIR/scripts:/script:Z \
+ --user=0 --rm "$BUILDER_IMAGE" /script/script-build.sh
+ rsync -ra "$SSH_HOST:$BUILD_DIR/volumes/shared/" /shared/
+ rsync -ra "$SSH_HOST:$BUILD_DIR/volumes/workdir/" /var/workdir/
+ rsync -ra "$SSH_HOST:$BUILD_DIR/tekton-results/" "/tekton/results/"
+ buildah pull oci:rhtap-final-image
+ buildah images
+ buildah tag localhost/rhtap-final-image "$IMAGE"
+ container=$(buildah from --pull-never "$IMAGE")
+ buildah mount "$container" | tee /shared/container_path
+ echo $container > /shared/container_name
+ securityContext:
+ capabilities:
+ add:
+ - SETFCAP
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ - mountPath: /entitlement
+ name: etc-pki-entitlement
+ - mountPath: /activation-key
+ name: activation-key
+ - mountPath: /additional-secret
+ name: additional-secret
+ - mountPath: /mnt/trusted-ca
+ name: trusted-ca
+ readOnly: true
+ - mountPath: /ssh
+ name: ssh
+ readOnly: true
+ workingDir: /var/workdir
+ - computeResources: {}
+ image: quay.io/redhat-appstudio/syft:v0.105.1@sha256:1910b829997650c696881e5fc2fc654ddf3184c27edb1b2024e9cb2ba51ac431
+ name: sbom-syft-generate
+ script: |
+ echo "Running syft on the source directory"
+ syft dir:/var/workdir/source --output cyclonedx-json=/var/workdir/sbom-source.json
+ find $(cat /shared/container_path) -xtype l -delete
+ echo "Running syft on the image filesystem"
+ syft dir:$(cat /shared/container_path) --output cyclonedx-json=/var/workdir/sbom-image.json
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ - mountPath: /shared
+ name: shared
+ workingDir: /var/workdir/source
+ - computeResources: {}
+ image: quay.io/redhat-appstudio/hacbs-jvm-build-request-processor:127ee0c223a2b56a9bd20a6f2eaeed3bd6015f77
+ name: analyse-dependencies-java-sbom
+ script: |
+ if [ -f /var/lib/containers/java ]; then
+ /opt/jboss/container/java/run/run-java.sh analyse-dependencies path $(cat /shared/container_path) -s /var/workdir/sbom-image.json --task-run-name $(context.taskRun.name) --publishers $(results.SBOM_JAVA_COMPONENTS_COUNT.path)
+ sed -i 's/^/ /' $(results.SBOM_JAVA_COMPONENTS_COUNT.path) # Workaround for SRVKP-2875
+ else
+ touch $(results.JAVA_COMMUNITY_DEPENDENCIES.path)
+ fi
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ - mountPath: /shared
+ name: shared
+ - computeResources: {}
+ image: registry.access.redhat.com/ubi9/python-39:1-172.1712567222@sha256:c96f839e927c52990143df4efb2872946fcd5de9e1ed2014947bb2cf3084c27a
+ name: merge-syft-sboms
+ script: |
+ #!/bin/python3
+ import json
+
+ # load SBOMs
+ with open("./sbom-image.json") as f:
+ image_sbom = json.load(f)
+
+ with open("./sbom-source.json") as f:
+ source_sbom = json.load(f)
+
+ # fetch unique components from available SBOMs
+ def get_identifier(component):
+ return component["name"] + '@' + component.get("version", "")
+
+ image_sbom_components = image_sbom.setdefault("components", [])
+ existing_components = [get_identifier(component) for component in image_sbom_components]
+
+ source_sbom_components = source_sbom.get("components", [])
+ for component in source_sbom_components:
+ if get_identifier(component) not in existing_components:
+ image_sbom_components.append(component)
+ existing_components.append(get_identifier(component))
+
+ image_sbom_components.sort(key=lambda c: get_identifier(c))
+
+ # write the CycloneDX unified SBOM
+ with open("./sbom-cyclonedx.json", "w") as f:
+ json.dump(image_sbom, f, indent=4)
+ securityContext:
+ runAsUser: 0
+ workingDir: /var/workdir
+ - computeResources: {}
+ image: quay.io/redhat-appstudio/cachi2:0.8.0@sha256:5cf15d6f3fb151a3e12c8a17024062b7cc62b0c3e1b165e4a9fa5bf7a77bdc30
+ name: merge-cachi2-sbom
+ script: |
+ if [ -f "sbom-cachi2.json" ]; then
+ echo "Merging contents of sbom-cachi2.json into sbom-cyclonedx.json"
+ /src/utils/merge_syft_sbom.py sbom-cachi2.json sbom-cyclonedx.json >sbom-temp.json
+ mv sbom-temp.json sbom-cyclonedx.json
+ else
+ echo "Skipping step since no Cachi2 SBOM was produced"
+ fi
+ securityContext:
+ runAsUser: 0
+ workingDir: /var/workdir
+ - computeResources: {}
+ image: registry.access.redhat.com/ubi9/python-39:1-172.1712567222@sha256:c96f839e927c52990143df4efb2872946fcd5de9e1ed2014947bb2cf3084c27a
+ name: create-purl-sbom
+ script: |
+ #!/bin/python3
+ import json
+
+ with open("./sbom-cyclonedx.json") as f:
+ cyclonedx_sbom = json.load(f)
+
+ purls = [{"purl": component["purl"]} for component in cyclonedx_sbom.get("components", []) if "purl" in component]
+ purl_content = {"image_contents": {"dependencies": purls}}
+
+ with open("sbom-purl.json", "w") as output_file:
+ json.dump(purl_content, output_file, indent=4)
+ securityContext:
+ runAsUser: 0
+ workingDir: /var/workdir
+ - computeResources: {}
+ image: quay.io/redhat-appstudio/base-images-sbom-script@sha256:667669e3def018f9dbb8eaf8868887a40bc07842221e9a98f6787edcff021840
+ name: create-base-images-sbom
+ script: |
+ python3 /app/base_images_sbom_script.py \
+ --sbom=sbom-cyclonedx.json \
+ --base-images-from-dockerfile=/shared/base_images_from_dockerfile \
+ --base-images-digests=/shared/base_images_digests
+ securityContext:
+ runAsUser: 0
+ workingDir: /var/workdir
+ - computeResources: {}
+ image: quay.io/konflux-ci/buildah:latest@sha256:9ef792d74bcc1d330de6be58b61f2cdbfa1c23b74a291eb2136ffd452d373050
+ name: inject-sbom-and-push
+ script: |
+ base_image_name=$(buildah inspect --format '{{ index .ImageAnnotations "org.opencontainers.image.base.name"}}' $IMAGE | cut -f1 -d'@')
+ base_image_digest=$(buildah inspect --format '{{ index .ImageAnnotations "org.opencontainers.image.base.digest"}}' $IMAGE)
+ container=$(buildah from --pull-never $IMAGE)
+ buildah copy $container sbom-cyclonedx.json sbom-purl.json /root/buildinfo/content_manifests/
+ buildah config -a org.opencontainers.image.base.name=${base_image_name} -a org.opencontainers.image.base.digest=${base_image_digest} $container
+
+ BUILDAH_ARGS=()
+ if [ "${SQUASH}" == "true" ]; then
+ BUILDAH_ARGS+=("--squash")
+ fi
+
+ buildah commit "${BUILDAH_ARGS[@]}" $container $IMAGE
+
+ status=-1
+ max_run=5
+ sleep_sec=10
+ for run in $(seq 1 $max_run); do
+ status=0
+ [ "$run" -gt 1 ] && sleep $sleep_sec
+ echo "Pushing sbom image to registry"
+ buildah push \
+ --tls-verify=$TLSVERIFY \
+ --digestfile /var/workdir/image-digest $IMAGE \
+ docker://$IMAGE && break || status=$?
+ done
+ if [ "$status" -ne 0 ]; then
+ echo "Failed to push sbom image to registry after ${max_run} tries"
+ exit 1
+ fi
+
+ cat "/var/workdir"/image-digest | tee $(results.IMAGE_DIGEST.path)
+ echo -n "$IMAGE" | tee $(results.IMAGE_URL.path)
+ securityContext:
+ capabilities:
+ add:
+ - SETFCAP
+ runAsUser: 0
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ workingDir: /var/workdir
+ - args:
+ - attach
+ - sbom
+ - --sbom
+ - sbom-cyclonedx.json
+ - --type
+ - cyclonedx
+ - $(params.IMAGE)
+ computeResources: {}
+ image: quay.io/redhat-appstudio/cosign:v2.1.1@sha256:c883d6f8d39148f2cea71bff4622d196d89df3e510f36c140c097b932f0dd5d5
+ name: upload-sbom
+ workingDir: /var/workdir
+ volumes:
+ - name: activation-key
+ secret:
+ optional: true
+ secretName: $(params.ACTIVATION_KEY)
+ - name: additional-secret
+ secret:
+ optional: true
+ secretName: $(params.ADDITIONAL_SECRET)
+ - name: etc-pki-entitlement
+ secret:
+ optional: true
+ secretName: $(params.ENTITLEMENT_SECRET)
+ - emptyDir: {}
+ name: shared
+ - configMap:
+ items:
+ - key: $(params.caTrustConfigMapKey)
+ path: ca-bundle.crt
+ name: $(params.caTrustConfigMapName)
+ optional: true
+ name: trusted-ca
+ - emptyDir: {}
+ name: varlibcontainers
+ - emptyDir: {}
+ name: workdir
+ - name: ssh
+ secret:
+ optional: false
+ secretName: multi-platform-ssh-$(context.taskRun.name)
diff --git a/task/buildah-remote/0.1/buildah-remote.yaml b/task/buildah-remote/0.1/buildah-remote.yaml
index 0525273b36..bb8a4e5a15 100644
--- a/task/buildah-remote/0.1/buildah-remote.yaml
+++ b/task/buildah-remote/0.1/buildah-remote.yaml
@@ -78,6 +78,10 @@ spec:
description: Name of secret which contains the entitlement certificates
name: ENTITLEMENT_SECRET
type: string
+ - default: activation-key
+ description: Name of secret which contains subscription activation key
+ name: ACTIVATION_KEY
+ type: string
- default: does-not-exist
description: Name of a secret which will be made available to the build with 'buildah
build --secret' at /run/secrets/$ADDITIONAL_SECRET
@@ -166,6 +170,8 @@ spec:
value: $(params.BUILDER_IMAGE)
- name: ENTITLEMENT_SECRET
value: $(params.ENTITLEMENT_SECRET)
+ - name: ACTIVATION_KEY
+ value: $(params.ACTIVATION_KEY)
- name: ADDITIONAL_SECRET
value: $(params.ADDITIONAL_SECRET)
- name: BUILD_ARGS_FILE
@@ -226,6 +232,7 @@ spec:
rsync -ra $(workspaces.source.path)/ "$SSH_HOST:$BUILD_DIR/workspaces/source/"
rsync -ra /shared/ "$SSH_HOST:$BUILD_DIR/volumes/shared/"
rsync -ra /entitlement/ "$SSH_HOST:$BUILD_DIR/volumes/etc-pki-entitlement/"
+ rsync -ra /activation-key/ "$SSH_HOST:$BUILD_DIR/volumes/activation-key/"
rsync -ra /additional-secret/ "$SSH_HOST:$BUILD_DIR/volumes/additional-secret/"
rsync -ra /mnt/trusted-ca/ "$SSH_HOST:$BUILD_DIR/volumes/trusted-ca/"
rsync -ra "$HOME/.docker/" "$SSH_HOST:$BUILD_DIR/.docker/"
@@ -362,6 +369,13 @@ spec:
echo "Adding the entitlement to the build"
fi
+ ACTIVATION_KEY_PATH="/activation-key"
+ if [ -d "$ACTIVATION_KEY_PATH" ]; then
+ cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/activation-key:/activation-key"
+ echo "Adding activation key to the build"
+ fi
+
ADDITIONAL_SECRET_PATH="/additional-secret"
ADDITIONAL_SECRET_TMP="/tmp/additional-secret"
if [ -d "$ADDITIONAL_SECRET_PATH" ]; then
@@ -405,6 +419,7 @@ spec:
chmod +x scripts/script-build.sh
rsync -ra scripts "$SSH_HOST:$BUILD_DIR"
ssh $SSH_ARGS "$SSH_HOST" $PORT_FORWARD podman run $PODMAN_PORT_FORWARD \
+ --tmpfs /run/secrets \
-e BUILDAH_FORMAT="$BUILDAH_FORMAT" \
-e STORAGE_DRIVER="$STORAGE_DRIVER" \
-e HERMETIC="$HERMETIC" \
@@ -419,6 +434,7 @@ spec:
-e TARGET_STAGE="$TARGET_STAGE" \
-e PARAM_BUILDER_IMAGE="$PARAM_BUILDER_IMAGE" \
-e ENTITLEMENT_SECRET="$ENTITLEMENT_SECRET" \
+ -e ACTIVATION_KEY="$ACTIVATION_KEY" \
-e ADDITIONAL_SECRET="$ADDITIONAL_SECRET" \
-e BUILD_ARGS_FILE="$BUILD_ARGS_FILE" \
-e ADD_CAPABILITIES="$ADD_CAPABILITIES" \
@@ -428,6 +444,7 @@ spec:
-v "$BUILD_DIR/workspaces/source:$(workspaces.source.path):Z" \
-v "$BUILD_DIR/volumes/shared:/shared:Z" \
-v "$BUILD_DIR/volumes/etc-pki-entitlement:/entitlement:Z" \
+ -v "$BUILD_DIR/volumes/activation-key:/activation-key:Z" \
-v "$BUILD_DIR/volumes/additional-secret:/additional-secret:Z" \
-v "$BUILD_DIR/volumes/trusted-ca:/mnt/trusted-ca:Z" \
-v "$BUILD_DIR/.docker/:/root/.docker:Z" \
@@ -452,6 +469,8 @@ spec:
name: varlibcontainers
- mountPath: /entitlement
name: etc-pki-entitlement
+ - mountPath: /activation-key
+ name: activation-key
- mountPath: /additional-secret
name: additional-secret
- mountPath: /mnt/trusted-ca
@@ -641,6 +660,10 @@ spec:
secret:
optional: true
secretName: $(params.ENTITLEMENT_SECRET)
+ - name: activation-key
+ secret:
+ optional: true
+ secretName: $(params.ACTIVATION_KEY)
- name: additional-secret
secret:
optional: true
diff --git a/task/buildah-remote/0.2/MIGRATION.md b/task/buildah-remote/0.2/MIGRATION.md
new file mode 100644
index 0000000000..e1f48756aa
--- /dev/null
+++ b/task/buildah-remote/0.2/MIGRATION.md
@@ -0,0 +1,49 @@
+# Migration from 0.1 to 0.2
+
+Version 0.2:
+
+* Removes the `BASE_IMAGES_DIGESTS` result. Please remove all the references to this
+ result from your pipeline.
+ * Base images and their digests can be found in the SBOM for the output image.
+* No longer writes the `base_images_from_dockerfile` file into the `source` workspace.
+* Removes the `BUILDER_IMAGE` and `DOCKER_AUTH` params. Neither one did anything
+ in the later releases of version 0.1. Please stop passing these params to the
+ buildah task if you used to do so with version 0.1.
+
+## Konflux-specific
+
+In a typical Konflux pipeline, the two tasks that used to depend on the `BASE_IMAGES_DIGESTS`
+result are `build-source-image` and `deprecated-base-image-check`.
+
+1. Make sure your version of `deprecated-base-image-check` is at least `0.4`.
+2. Make sure your version of `build-source-image` supports reading base images from
+ the SBOM. Version `0.1` supports it since 2024-07-15. In the logs of your build
+ pipeline, you should see that the build-source-image task now has a GET-BASE-IMAGES
+ step. Once you stop passing the `BASE_IMAGES_DIGESTS` param, this step will emit
+ logs about handling the SBOM.
+3. Remove the parameters that reference the `BASE_IMAGES_DIGESTS` result:
+
+```diff
+@@ -255,10 +255,8 @@ spec:
+ - name: build-source-image
+ params:
+ - name: BINARY_IMAGE
+ value: $(params.output-image)
+- - name: BASE_IMAGES
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ runAfter:
+ - build-container
+ taskRef:
+ params:
+@@ -282,10 +280,8 @@ spec:
+ - name: workspace
+ workspace: workspace
+ - name: deprecated-base-image-check
+ params:
+- - name: BASE_IMAGES_DIGESTS
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ - name: IMAGE_URL
+ value: $(tasks.build-container.results.IMAGE_URL)
+ - name: IMAGE_DIGEST
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
+```
diff --git a/task/buildah-remote/0.2/buildah-remote.yaml b/task/buildah-remote/0.2/buildah-remote.yaml
new file mode 100644
index 0000000000..931ba472e1
--- /dev/null
+++ b/task/buildah-remote/0.2/buildah-remote.yaml
@@ -0,0 +1,661 @@
+apiVersion: tekton.dev/v1
+kind: Task
+metadata:
+ annotations:
+ tekton.dev/pipelines.minVersion: 0.12.1
+ tekton.dev/tags: image-build, konflux
+ creationTimestamp: null
+ labels:
+ app.kubernetes.io/version: "0.2"
+ build.appstudio.redhat.com/build_type: docker
+ name: buildah-remote
+spec:
+ description: |-
+ Buildah task builds source code into a container image and pushes the image into container registry using buildah tool.
+ In addition it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool.
+ When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts.
+ When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment.
+ params:
+ - description: Reference of the image buildah will produce.
+ name: IMAGE
+ type: string
+ - default: ./Dockerfile
+ description: Path to the Dockerfile to build.
+ name: DOCKERFILE
+ type: string
+ - default: .
+ description: Path to the directory to use as context.
+ name: CONTEXT
+ type: string
+ - default: "true"
+ description: Verify the TLS on the registry endpoint (for push/pull to a non-TLS
+ registry)
+ name: TLSVERIFY
+ type: string
+ - default: "false"
+ description: Determines if build will be executed without network access.
+ name: HERMETIC
+ type: string
+ - default: ""
+ description: In case it is not empty, the prefetched content should be made available
+ to the build.
+ name: PREFETCH_INPUT
+ type: string
+ - default: ""
+ description: Delete image tag after specified time. Empty means to keep the image
+ tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks,
+ respectively.
+ name: IMAGE_EXPIRES_AFTER
+ type: string
+ - default: ""
+ description: The image is built from this commit.
+ name: COMMIT_SHA
+ type: string
+ - default: repos.d
+ description: Path in the git repository in which yum repository files are stored
+ name: YUM_REPOS_D_SRC
+ - default: fetched.repos.d
+ description: Path in source workspace where dynamically-fetched repos are present
+ name: YUM_REPOS_D_FETCHED
+ - default: /etc/yum.repos.d
+ description: Target path on the container in which yum repository files should
+ be made available
+ name: YUM_REPOS_D_TARGET
+ - default: ""
+ description: Target stage in Dockerfile to build. If not specified, the Dockerfile
+ is processed entirely to (and including) its last stage.
+ name: TARGET_STAGE
+ type: string
+ - default: etc-pki-entitlement
+ description: Name of secret which contains the entitlement certificates
+ name: ENTITLEMENT_SECRET
+ type: string
+ - default: activation-key
+ description: Name of secret which contains subscription activation key
+ name: ACTIVATION_KEY
+ type: string
+ - default: does-not-exist
+ description: Name of a secret which will be made available to the build with 'buildah
+ build --secret' at /run/secrets/$ADDITIONAL_SECRET
+ name: ADDITIONAL_SECRET
+ type: string
+ - default: []
+ description: Array of --build-arg values ("arg=value" strings)
+ name: BUILD_ARGS
+ type: array
+ - default: ""
+ description: Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file
+ name: BUILD_ARGS_FILE
+ type: string
+ - default: trusted-ca
+ description: The name of the ConfigMap to read CA bundle data from.
+ name: caTrustConfigMapName
+ type: string
+ - default: ca-bundle.crt
+ description: The name of the key in the ConfigMap that contains the CA bundle
+ data.
+ name: caTrustConfigMapKey
+ type: string
+ - default: ""
+ description: Comma separated list of extra capabilities to add when running 'buildah
+ build'
+ name: ADD_CAPABILITIES
+ type: string
+ - default: "false"
+ description: Squash all new and previous layers added as a part of this build,
+ as per --squash
+ name: SQUASH
+ type: string
+ - default: vfs
+ description: Storage driver to configure for buildah
+ name: STORAGE_DRIVER
+ type: string
+ - default: "true"
+ description: Whether to skip stages in Containerfile that seem unused by subsequent
+ stages
+ name: SKIP_UNUSED_STAGES
+ type: string
+ - description: The platform to build on
+ name: PLATFORM
+ type: string
+ results:
+ - description: Digest of the image just built
+ name: IMAGE_DIGEST
+ - description: Image repository where the built image was pushed
+ name: IMAGE_URL
+ - description: The counting of Java components by publisher in JSON format
+ name: SBOM_JAVA_COMPONENTS_COUNT
+ type: string
+ - description: The Java dependencies that came from community sources such as Maven
+ central.
+ name: JAVA_COMMUNITY_DEPENDENCIES
+ stepTemplate:
+ computeResources: {}
+ env:
+ - name: BUILDAH_FORMAT
+ value: oci
+ - name: STORAGE_DRIVER
+ value: $(params.STORAGE_DRIVER)
+ - name: HERMETIC
+ value: $(params.HERMETIC)
+ - name: CONTEXT
+ value: $(params.CONTEXT)
+ - name: DOCKERFILE
+ value: $(params.DOCKERFILE)
+ - name: IMAGE
+ value: $(params.IMAGE)
+ - name: TLSVERIFY
+ value: $(params.TLSVERIFY)
+ - name: IMAGE_EXPIRES_AFTER
+ value: $(params.IMAGE_EXPIRES_AFTER)
+ - name: YUM_REPOS_D_SRC
+ value: $(params.YUM_REPOS_D_SRC)
+ - name: YUM_REPOS_D_FETCHED
+ value: $(params.YUM_REPOS_D_FETCHED)
+ - name: YUM_REPOS_D_TARGET
+ value: $(params.YUM_REPOS_D_TARGET)
+ - name: TARGET_STAGE
+ value: $(params.TARGET_STAGE)
+ - name: ENTITLEMENT_SECRET
+ value: $(params.ENTITLEMENT_SECRET)
+ - name: ACTIVATION_KEY
+ value: $(params.ACTIVATION_KEY)
+ - name: ADDITIONAL_SECRET
+ value: $(params.ADDITIONAL_SECRET)
+ - name: BUILD_ARGS_FILE
+ value: $(params.BUILD_ARGS_FILE)
+ - name: ADD_CAPABILITIES
+ value: $(params.ADD_CAPABILITIES)
+ - name: SQUASH
+ value: $(params.SQUASH)
+ - name: SKIP_UNUSED_STAGES
+ value: $(params.SKIP_UNUSED_STAGES)
+ - name: BUILDER_IMAGE
+ value: quay.io/konflux-ci/buildah:latest@sha256:9ef792d74bcc1d330de6be58b61f2cdbfa1c23b74a291eb2136ffd452d373050
+ volumeMounts:
+ - mountPath: /shared
+ name: shared
+ steps:
+ - args:
+ - $(params.BUILD_ARGS[*])
+ computeResources:
+ limits:
+ memory: 4Gi
+ requests:
+ cpu: 250m
+ memory: 512Mi
+ env:
+ - name: COMMIT_SHA
+ value: $(params.COMMIT_SHA)
+ image: quay.io/redhat-appstudio/multi-platform-runner:01c7670e81d5120347cf0ad13372742489985e5f@sha256:246adeaaba600e207131d63a7f706cffdcdc37d8f600c56187123ec62823ff44
+ name: build
+ script: |-
+ set -o verbose
+ mkdir -p ~/.ssh
+ if [ -e "/ssh/error" ]; then
+ #no server could be provisioned
+ cat /ssh/error
+ exit 1
+ elif [ -e "/ssh/otp" ]; then
+ curl --cacert /ssh/otp-ca -XPOST -d @/ssh/otp $(cat /ssh/otp-server) >~/.ssh/id_rsa
+ echo "" >> ~/.ssh/id_rsa
+ else
+ cp /ssh/id_rsa ~/.ssh
+ fi
+ chmod 0400 ~/.ssh/id_rsa
+ export SSH_HOST=$(cat /ssh/host)
+ export BUILD_DIR=$(cat /ssh/user-dir)
+ export SSH_ARGS="-o StrictHostKeyChecking=no"
+ mkdir -p scripts
+ echo "$BUILD_DIR"
+ ssh $SSH_ARGS "$SSH_HOST" mkdir -p "$BUILD_DIR/workspaces" "$BUILD_DIR/scripts" "$BUILD_DIR/volumes"
+
+ PORT_FORWARD=""
+ PODMAN_PORT_FORWARD=""
+ if [ -n "$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR" ] ; then
+ PORT_FORWARD=" -L 80:$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR:80"
+ PODMAN_PORT_FORWARD=" -e JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR=localhost"
+ fi
+
+ rsync -ra $(workspaces.source.path)/ "$SSH_HOST:$BUILD_DIR/workspaces/source/"
+ rsync -ra /shared/ "$SSH_HOST:$BUILD_DIR/volumes/shared/"
+ rsync -ra /entitlement/ "$SSH_HOST:$BUILD_DIR/volumes/etc-pki-entitlement/"
+ rsync -ra /activation-key/ "$SSH_HOST:$BUILD_DIR/volumes/activation-key/"
+ rsync -ra /additional-secret/ "$SSH_HOST:$BUILD_DIR/volumes/additional-secret/"
+ rsync -ra /mnt/trusted-ca/ "$SSH_HOST:$BUILD_DIR/volumes/trusted-ca/"
+ rsync -ra "$HOME/.docker/" "$SSH_HOST:$BUILD_DIR/.docker/"
+ rsync -ra "/tekton/results/" "$SSH_HOST:$BUILD_DIR/tekton-results/"
+ cat >scripts/script-build.sh <<'REMOTESSHEOF'
+ #!/bin/bash
+ set -o verbose
+ set -e
+ cd $(workspaces.source.path)
+ ca_bundle=/mnt/trusted-ca/ca-bundle.crt
+ if [ -f "$ca_bundle" ]; then
+ echo "INFO: Using mounted CA bundle: $ca_bundle"
+ cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors
+ update-ca-trust
+ fi
+
+ SOURCE_CODE_DIR=source
+ if [ -e "$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" ]; then
+ dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE"
+ elif [ -e "$SOURCE_CODE_DIR/$DOCKERFILE" ]; then
+ dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$DOCKERFILE"
+ elif echo "$DOCKERFILE" | grep -q "^https\?://"; then
+ echo "Fetch Dockerfile from $DOCKERFILE"
+ dockerfile_path=$(mktemp --suffix=-Dockerfile)
+ http_code=$(curl -s -L -w "%{http_code}" --output "$dockerfile_path" "$DOCKERFILE")
+ if [ $http_code != 200 ]; then
+ echo "No Dockerfile is fetched. Server responds $http_code"
+ exit 1
+ fi
+ http_code=$(curl -s -L -w "%{http_code}" --output "$dockerfile_path.dockerignore.tmp" "$DOCKERFILE.dockerignore")
+ if [ $http_code = 200 ]; then
+ echo "Fetched .dockerignore from $DOCKERFILE.dockerignore"
+ mv "$dockerfile_path.dockerignore.tmp" $SOURCE_CODE_DIR/$CONTEXT/.dockerignore
+ fi
+ else
+ echo "Cannot find Dockerfile $DOCKERFILE"
+ exit 1
+ fi
+ if [ -n "$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR" ] && grep -q '^\s*RUN \(./\)\?mvn' "$dockerfile_path"; then
+ sed -i -e "s|^\s*RUN \(\(./\)\?mvn\)\(.*\)|RUN echo \"mirror.defaulthttp://$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR/v1/cache/default/0/*\" > /tmp/settings.yaml; \1 -s /tmp/settings.yaml \3|g" "$dockerfile_path"
+ touch /var/lib/containers/java
+ fi
+
+ # Fixing group permission on /var/lib/containers
+ chown root:root /var/lib/containers
+
+ sed -i 's/^\s*short-name-mode\s*=\s*.*/short-name-mode = "disabled"/' /etc/containers/registries.conf
+
+ # Setting new namespace to run buildah - 2^32-2
+ echo 'root:1:4294967294' | tee -a /etc/subuid >> /etc/subgid
+
+ BUILDAH_ARGS=()
+
+ BASE_IMAGES=$(grep -i '^\s*FROM' "$dockerfile_path" | sed 's/--platform=\S*//' | awk '{print $2}' | (grep -v ^oci-archive: || true))
+ if [ "${HERMETIC}" == "true" ]; then
+ BUILDAH_ARGS+=("--pull=never")
+ UNSHARE_ARGS="--net"
+ for image in $BASE_IMAGES; do
+ if [ "${image}" != "scratch" ]; then
+ unshare -Ufp --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -- buildah pull $image
+ fi
+ done
+ echo "Build will be executed with network isolation"
+ fi
+
+ if [ -n "${TARGET_STAGE}" ]; then
+ BUILDAH_ARGS+=("--target=${TARGET_STAGE}")
+ fi
+
+ if [ -n "${BUILD_ARGS_FILE}" ]; then
+ BUILDAH_ARGS+=("--build-arg-file=$(pwd)/$SOURCE_CODE_DIR/${BUILD_ARGS_FILE}")
+ fi
+
+ for build_arg in "$@"; do
+ BUILDAH_ARGS+=("--build-arg=$build_arg")
+ done
+
+ if [ -n "${ADD_CAPABILITIES}" ]; then
+ BUILDAH_ARGS+=("--cap-add=${ADD_CAPABILITIES}")
+ fi
+
+ if [ "${SQUASH}" == "true" ]; then
+ BUILDAH_ARGS+=("--squash")
+ fi
+
+ if [ "${SKIP_UNUSED_STAGES}" != "true" ] ; then
+ BUILDAH_ARGS+=("--skip-unused-stages=false")
+ fi
+
+ if [ -f "$(workspaces.source.path)/cachi2/cachi2.env" ]; then
+ cp -r "$(workspaces.source.path)/cachi2" /tmp/
+ chmod -R go+rwX /tmp/cachi2
+ VOLUME_MOUNTS="--volume /tmp/cachi2:/cachi2"
+ sed -i 's|^\s*run |RUN . /cachi2/cachi2.env \&\& \\\n |i' "$dockerfile_path"
+ echo "Prefetched content will be made available"
+
+ prefetched_repo_for_my_arch="/tmp/cachi2/output/deps/rpm/$(uname -m)/repos.d/cachi2.repo"
+ if [ -f "$prefetched_repo_for_my_arch" ]; then
+ echo "Adding $prefetched_repo_for_my_arch to $YUM_REPOS_D_FETCHED"
+ mkdir -p "$YUM_REPOS_D_FETCHED"
+ cp --no-clobber "$prefetched_repo_for_my_arch" "$YUM_REPOS_D_FETCHED"
+ fi
+ fi
+
+ # if yum repofiles stored in git, copy them to mount point outside the source dir
+ if [ -d "${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}" ]; then
+ mkdir -p ${YUM_REPOS_D_FETCHED}
+ cp -r ${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}/* ${YUM_REPOS_D_FETCHED}
+ fi
+
+ # if anything in the repofiles mount point (either fetched or from git), mount it
+ if [ -d "${YUM_REPOS_D_FETCHED}" ]; then
+ chmod -R go+rwX ${YUM_REPOS_D_FETCHED}
+ mount_point=$(realpath ${YUM_REPOS_D_FETCHED})
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume ${mount_point}:${YUM_REPOS_D_TARGET}"
+ fi
+
+ LABELS=(
+ "--label" "build-date=$(date -u +'%Y-%m-%dT%H:%M:%S')"
+ "--label" "architecture=$(uname -m)"
+ "--label" "vcs-type=git"
+ )
+ [ -n "$COMMIT_SHA" ] && LABELS+=("--label" "vcs-ref=$COMMIT_SHA")
+ [ -n "$IMAGE_EXPIRES_AFTER" ] && LABELS+=("--label" "quay.expires-after=$IMAGE_EXPIRES_AFTER")
+
+ ENTITLEMENT_PATH="/entitlement"
+ if [ -d "$ENTITLEMENT_PATH" ]; then
+ cp -r --preserve=mode "$ENTITLEMENT_PATH" /tmp/entitlement
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/entitlement:/etc/pki/entitlement"
+ echo "Adding the entitlement to the build"
+ fi
+
+ ACTIVATION_KEY_PATH="/activation-key"
+ if [ -d "$ACTIVATION_KEY_PATH" ]; then
+ cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/activation-key:/activation-key"
+ echo "Adding activation key to the build"
+ fi
+
+ ADDITIONAL_SECRET_PATH="/additional-secret"
+ ADDITIONAL_SECRET_TMP="/tmp/additional-secret"
+ if [ -d "$ADDITIONAL_SECRET_PATH" ]; then
+ cp -r --preserve=mode -L "$ADDITIONAL_SECRET_PATH" $ADDITIONAL_SECRET_TMP
+ while read -r filename; do
+ echo "Adding the secret ${ADDITIONAL_SECRET}/${filename} to the build, available at /run/secrets/${ADDITIONAL_SECRET}/${filename}"
+ BUILDAH_ARGS+=("--secret=id=${ADDITIONAL_SECRET}/${filename},src=$ADDITIONAL_SECRET_TMP/${filename}")
+ done < <(find $ADDITIONAL_SECRET_TMP -maxdepth 1 -type f -exec basename {} \;)
+ fi
+
+ unshare -Uf $UNSHARE_ARGS --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -w ${SOURCE_CODE_DIR}/$CONTEXT -- buildah build \
+ $VOLUME_MOUNTS \
+ "${BUILDAH_ARGS[@]}" \
+ "${LABELS[@]}" \
+ --tls-verify=$TLSVERIFY --no-cache \
+ --ulimit nofile=4096:4096 \
+ -f "$dockerfile_path" -t $IMAGE .
+
+ container=$(buildah from --pull-never $IMAGE)
+ buildah mount $container | tee /shared/container_path
+ echo $container > /shared/container_name
+
+ # Save the SBOM produced by Cachi2 so it can be merged into the final SBOM later
+ if [ -f "/tmp/cachi2/output/bom.json" ]; then
+ cp /tmp/cachi2/output/bom.json ./sbom-cachi2.json
+ fi
+
+ for image in $BASE_IMAGES; do
+ if [ "${image}" != "scratch" ]; then
+ buildah images --format '{{ .Name }}:{{ .Tag }}@{{ .Digest }}' --filter reference="$image" >> /shared/base_images_digests
+ fi
+ done
+
+ # Needed to generate base images SBOM
+ echo "$BASE_IMAGES" > /shared/base_images_from_dockerfile
+
+ buildah push "$IMAGE" oci:rhtap-final-image
+ REMOTESSHEOF
+ chmod +x scripts/script-build.sh
+ rsync -ra scripts "$SSH_HOST:$BUILD_DIR"
+ ssh $SSH_ARGS "$SSH_HOST" $PORT_FORWARD podman run $PODMAN_PORT_FORWARD \
+ --tmpfs /run/secrets \
+ -e BUILDAH_FORMAT="$BUILDAH_FORMAT" \
+ -e STORAGE_DRIVER="$STORAGE_DRIVER" \
+ -e HERMETIC="$HERMETIC" \
+ -e CONTEXT="$CONTEXT" \
+ -e DOCKERFILE="$DOCKERFILE" \
+ -e IMAGE="$IMAGE" \
+ -e TLSVERIFY="$TLSVERIFY" \
+ -e IMAGE_EXPIRES_AFTER="$IMAGE_EXPIRES_AFTER" \
+ -e YUM_REPOS_D_SRC="$YUM_REPOS_D_SRC" \
+ -e YUM_REPOS_D_FETCHED="$YUM_REPOS_D_FETCHED" \
+ -e YUM_REPOS_D_TARGET="$YUM_REPOS_D_TARGET" \
+ -e TARGET_STAGE="$TARGET_STAGE" \
+ -e ENTITLEMENT_SECRET="$ENTITLEMENT_SECRET" \
+ -e ACTIVATION_KEY="$ACTIVATION_KEY" \
+ -e ADDITIONAL_SECRET="$ADDITIONAL_SECRET" \
+ -e BUILD_ARGS_FILE="$BUILD_ARGS_FILE" \
+ -e ADD_CAPABILITIES="$ADD_CAPABILITIES" \
+ -e SQUASH="$SQUASH" \
+ -e SKIP_UNUSED_STAGES="$SKIP_UNUSED_STAGES" \
+ -e COMMIT_SHA="$COMMIT_SHA" \
+ -v "$BUILD_DIR/workspaces/source:$(workspaces.source.path):Z" \
+ -v "$BUILD_DIR/volumes/shared:/shared:Z" \
+ -v "$BUILD_DIR/volumes/etc-pki-entitlement:/entitlement:Z" \
+ -v "$BUILD_DIR/volumes/activation-key:/activation-key:Z" \
+ -v "$BUILD_DIR/volumes/additional-secret:/additional-secret:Z" \
+ -v "$BUILD_DIR/volumes/trusted-ca:/mnt/trusted-ca:Z" \
+ -v "$BUILD_DIR/.docker/:/root/.docker:Z" \
+ -v "$BUILD_DIR/tekton-results/:/tekton/results:Z" \
+ -v $BUILD_DIR/scripts:/script:Z \
+ --user=0 --rm "$BUILDER_IMAGE" /script/script-build.sh
+ rsync -ra "$SSH_HOST:$BUILD_DIR/workspaces/source/" "$(workspaces.source.path)/"
+ rsync -ra "$SSH_HOST:$BUILD_DIR/volumes/shared/" /shared/
+ rsync -ra "$SSH_HOST:$BUILD_DIR/tekton-results/" "/tekton/results/"
+ buildah pull oci:rhtap-final-image
+ buildah images
+ buildah tag localhost/rhtap-final-image "$IMAGE"
+ container=$(buildah from --pull-never "$IMAGE")
+ buildah mount "$container" | tee /shared/container_path
+ echo $container > /shared/container_name
+ securityContext:
+ capabilities:
+ add:
+ - SETFCAP
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ - mountPath: /entitlement
+ name: etc-pki-entitlement
+ - mountPath: /activation-key
+ name: activation-key
+ - mountPath: /additional-secret
+ name: additional-secret
+ - mountPath: /mnt/trusted-ca
+ name: trusted-ca
+ readOnly: true
+ - mountPath: /ssh
+ name: ssh
+ readOnly: true
+ workingDir: $(workspaces.source.path)
+ - computeResources: {}
+ image: quay.io/redhat-appstudio/syft:v0.105.1@sha256:1910b829997650c696881e5fc2fc654ddf3184c27edb1b2024e9cb2ba51ac431
+ name: sbom-syft-generate
+ script: |
+ echo "Running syft on the source directory"
+ syft dir:$(workspaces.source.path)/source --output cyclonedx-json=$(workspaces.source.path)/sbom-source.json
+ find $(cat /shared/container_path) -xtype l -delete
+ echo "Running syft on the image filesystem"
+ syft dir:$(cat /shared/container_path) --output cyclonedx-json=$(workspaces.source.path)/sbom-image.json
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ - mountPath: /shared
+ name: shared
+ workingDir: $(workspaces.source.path)/source
+ - computeResources: {}
+ image: quay.io/redhat-appstudio/hacbs-jvm-build-request-processor:127ee0c223a2b56a9bd20a6f2eaeed3bd6015f77
+ name: analyse-dependencies-java-sbom
+ script: |
+ if [ -f /var/lib/containers/java ]; then
+ /opt/jboss/container/java/run/run-java.sh analyse-dependencies path $(cat /shared/container_path) -s $(workspaces.source.path)/sbom-image.json --task-run-name $(context.taskRun.name) --publishers $(results.SBOM_JAVA_COMPONENTS_COUNT.path)
+ sed -i 's/^/ /' $(results.SBOM_JAVA_COMPONENTS_COUNT.path) # Workaround for SRVKP-2875
+ else
+ touch $(results.JAVA_COMMUNITY_DEPENDENCIES.path)
+ fi
+ securityContext:
+ runAsUser: 0
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ - mountPath: /shared
+ name: shared
+ - computeResources: {}
+ image: registry.access.redhat.com/ubi9/python-39:1-172.1712567222@sha256:c96f839e927c52990143df4efb2872946fcd5de9e1ed2014947bb2cf3084c27a
+ name: merge-syft-sboms
+ script: |
+ #!/bin/python3
+ import json
+
+ # load SBOMs
+ with open("./sbom-image.json") as f:
+ image_sbom = json.load(f)
+
+ with open("./sbom-source.json") as f:
+ source_sbom = json.load(f)
+
+ # fetch unique components from available SBOMs
+ def get_identifier(component):
+ return component["name"] + '@' + component.get("version", "")
+
+ image_sbom_components = image_sbom.setdefault("components", [])
+ existing_components = [get_identifier(component) for component in image_sbom_components]
+
+ source_sbom_components = source_sbom.get("components", [])
+ for component in source_sbom_components:
+ if get_identifier(component) not in existing_components:
+ image_sbom_components.append(component)
+ existing_components.append(get_identifier(component))
+
+ image_sbom_components.sort(key=lambda c: get_identifier(c))
+
+ # write the CycloneDX unified SBOM
+ with open("./sbom-cyclonedx.json", "w") as f:
+ json.dump(image_sbom, f, indent=4)
+ securityContext:
+ runAsUser: 0
+ workingDir: $(workspaces.source.path)
+ - computeResources: {}
+ image: quay.io/redhat-appstudio/cachi2:0.8.0@sha256:5cf15d6f3fb151a3e12c8a17024062b7cc62b0c3e1b165e4a9fa5bf7a77bdc30
+ name: merge-cachi2-sbom
+ script: |
+ if [ -f "sbom-cachi2.json" ]; then
+ echo "Merging contents of sbom-cachi2.json into sbom-cyclonedx.json"
+ /src/utils/merge_syft_sbom.py sbom-cachi2.json sbom-cyclonedx.json > sbom-temp.json
+ mv sbom-temp.json sbom-cyclonedx.json
+ else
+ echo "Skipping step since no Cachi2 SBOM was produced"
+ fi
+ securityContext:
+ runAsUser: 0
+ workingDir: $(workspaces.source.path)
+ - computeResources: {}
+ image: registry.access.redhat.com/ubi9/python-39:1-172.1712567222@sha256:c96f839e927c52990143df4efb2872946fcd5de9e1ed2014947bb2cf3084c27a
+ name: create-purl-sbom
+ script: |
+ #!/bin/python3
+ import json
+
+ with open("./sbom-cyclonedx.json") as f:
+ cyclonedx_sbom = json.load(f)
+
+ purls = [{"purl": component["purl"]} for component in cyclonedx_sbom.get("components", []) if "purl" in component]
+ purl_content = {"image_contents": {"dependencies": purls}}
+
+ with open("sbom-purl.json", "w") as output_file:
+ json.dump(purl_content, output_file, indent=4)
+ securityContext:
+ runAsUser: 0
+ workingDir: $(workspaces.source.path)
+ - computeResources: {}
+ image: quay.io/redhat-appstudio/base-images-sbom-script@sha256:667669e3def018f9dbb8eaf8868887a40bc07842221e9a98f6787edcff021840
+ name: create-base-images-sbom
+ script: |
+ python3 /app/base_images_sbom_script.py \
+ --sbom=sbom-cyclonedx.json \
+ --base-images-from-dockerfile=/shared/base_images_from_dockerfile \
+ --base-images-digests=/shared/base_images_digests
+ securityContext:
+ runAsUser: 0
+ workingDir: $(workspaces.source.path)
+ - computeResources: {}
+ image: quay.io/konflux-ci/buildah:latest@sha256:9ef792d74bcc1d330de6be58b61f2cdbfa1c23b74a291eb2136ffd452d373050
+ name: inject-sbom-and-push
+ script: |
+ base_image_name=$(buildah inspect --format '{{ index .ImageAnnotations "org.opencontainers.image.base.name"}}' $IMAGE | cut -f1 -d'@')
+ base_image_digest=$(buildah inspect --format '{{ index .ImageAnnotations "org.opencontainers.image.base.digest"}}' $IMAGE)
+ container=$(buildah from --pull-never $IMAGE)
+ buildah copy $container sbom-cyclonedx.json sbom-purl.json /root/buildinfo/content_manifests/
+ buildah config -a org.opencontainers.image.base.name=${base_image_name} -a org.opencontainers.image.base.digest=${base_image_digest} $container
+
+ BUILDAH_ARGS=()
+ if [ "${SQUASH}" == "true" ]; then
+ BUILDAH_ARGS+=("--squash")
+ fi
+
+ buildah commit "${BUILDAH_ARGS[@]}" $container $IMAGE
+
+ status=-1
+ max_run=5
+ sleep_sec=10
+ for run in $(seq 1 $max_run); do
+ status=0
+ [ "$run" -gt 1 ] && sleep $sleep_sec
+ echo "Pushing sbom image to registry"
+ buildah push \
+ --tls-verify=$TLSVERIFY \
+ --digestfile $(workspaces.source.path)/image-digest $IMAGE \
+ docker://$IMAGE && break || status=$?
+ done
+ if [ "$status" -ne 0 ]; then
+ echo "Failed to push sbom image to registry after ${max_run} tries"
+ exit 1
+ fi
+
+ cat "$(workspaces.source.path)"/image-digest | tee $(results.IMAGE_DIGEST.path)
+ echo -n "$IMAGE" | tee $(results.IMAGE_URL.path)
+ securityContext:
+ capabilities:
+ add:
+ - SETFCAP
+ runAsUser: 0
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ workingDir: $(workspaces.source.path)
+ - args:
+ - attach
+ - sbom
+ - --sbom
+ - sbom-cyclonedx.json
+ - --type
+ - cyclonedx
+ - $(params.IMAGE)
+ computeResources: {}
+ image: quay.io/redhat-appstudio/cosign:v2.1.1@sha256:c883d6f8d39148f2cea71bff4622d196d89df3e510f36c140c097b932f0dd5d5
+ name: upload-sbom
+ workingDir: $(workspaces.source.path)
+ volumes:
+ - emptyDir: {}
+ name: varlibcontainers
+ - emptyDir: {}
+ name: shared
+ - name: etc-pki-entitlement
+ secret:
+ optional: true
+ secretName: $(params.ENTITLEMENT_SECRET)
+ - name: activation-key
+ secret:
+ optional: true
+ secretName: $(params.ACTIVATION_KEY)
+ - name: additional-secret
+ secret:
+ optional: true
+ secretName: $(params.ADDITIONAL_SECRET)
+ - configMap:
+ items:
+ - key: $(params.caTrustConfigMapKey)
+ path: ca-bundle.crt
+ name: $(params.caTrustConfigMapName)
+ optional: true
+ name: trusted-ca
+ - name: ssh
+ secret:
+ optional: false
+ secretName: multi-platform-ssh-$(context.taskRun.name)
+ workspaces:
+ - description: Workspace containing the source code to build.
+ name: source
diff --git a/task/buildah/0.1/README.md b/task/buildah/0.1/README.md
index def5b2f122..7e773783b0 100644
--- a/task/buildah/0.1/README.md
+++ b/task/buildah/0.1/README.md
@@ -23,10 +23,16 @@ When prefetch-dependencies task was activated it is using its artifacts to run b
|YUM_REPOS_D_TARGET|Target path on the container in which yum repository files should be made available|/etc/yum.repos.d|false|
|TARGET_STAGE|Target stage in Dockerfile to build. If not specified, the Dockerfile is processed entirely to (and including) its last stage.|""|false|
|ENTITLEMENT_SECRET|Name of secret which contains the entitlement certificates|etc-pki-entitlement|false|
+|ACTIVATION_KEY|Name of secret which contains subscription activation key|activation-key|false|
|ADDITIONAL_SECRET|Name of a secret which will be made available to the build with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET|does-not-exist|false|
|BUILD_ARGS|Array of --build-arg values ("arg=value" strings)|[]|false|
|BUILD_ARGS_FILE|Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file|""|false|
+|caTrustConfigMapName|The name of the ConfigMap to read CA bundle data from.|trusted-ca|false|
+|caTrustConfigMapKey|The name of the key in the ConfigMap that contains the CA bundle data.|ca-bundle.crt|false|
+|ADD_CAPABILITIES|Comma separated list of extra capabilities to add when running 'buildah build'|""|false|
|SQUASH|Squash all new and previous layers added as a part of this build, as per --squash|false|false|
+|STORAGE_DRIVER|Storage driver to configure for buildah|vfs|false|
+|SKIP_UNUSED_STAGES|Whether to skip stages in Containerfile that seem unused by subsequent stages|true|false|
## Results
|name|description|
diff --git a/task/buildah/0.1/buildah.yaml b/task/buildah/0.1/buildah.yaml
index 04e493dd3f..dec1ad4b44 100644
--- a/task/buildah/0.1/buildah.yaml
+++ b/task/buildah/0.1/buildah.yaml
@@ -71,6 +71,10 @@ spec:
description: Name of secret which contains the entitlement certificates
type: string
default: "etc-pki-entitlement"
+ - name: ACTIVATION_KEY
+ default: activation-key
+ description: Name of secret which contains subscription activation key
+ type: string
- name: ADDITIONAL_SECRET
description: Name of a secret which will be made available to the build with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET
type: string
@@ -153,6 +157,8 @@ spec:
value: $(params.BUILDER_IMAGE)
- name: ENTITLEMENT_SECRET
value: $(params.ENTITLEMENT_SECRET)
+ - name: ACTIVATION_KEY
+ value: $(params.ACTIVATION_KEY)
- name: ADDITIONAL_SECRET
value: $(params.ADDITIONAL_SECRET)
- name: BUILD_ARGS_FILE
@@ -306,6 +312,13 @@ spec:
echo "Adding the entitlement to the build"
fi
+ ACTIVATION_KEY_PATH="/activation-key"
+ if [ -d "$ACTIVATION_KEY_PATH" ]; then
+ cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/activation-key:/activation-key"
+ echo "Adding activation key to the build"
+ fi
+
ADDITIONAL_SECRET_PATH="/additional-secret"
ADDITIONAL_SECRET_TMP="/tmp/additional-secret"
if [ -d "$ADDITIONAL_SECRET_PATH" ]; then
@@ -353,6 +366,8 @@ spec:
name: varlibcontainers
- mountPath: "/entitlement"
name: etc-pki-entitlement
+ - mountPath: /activation-key
+ name: activation-key
- mountPath: "/additional-secret"
name: additional-secret
- name: trusted-ca
@@ -543,6 +558,10 @@ spec:
secret:
secretName: $(params.ENTITLEMENT_SECRET)
optional: true
+ - name: activation-key
+ secret:
+ optional: true
+ secretName: $(params.ACTIVATION_KEY)
- name: additional-secret
secret:
secretName: $(params.ADDITIONAL_SECRET)
diff --git a/task/buildah/0.2/MIGRATION.md b/task/buildah/0.2/MIGRATION.md
new file mode 100644
index 0000000000..e1f48756aa
--- /dev/null
+++ b/task/buildah/0.2/MIGRATION.md
@@ -0,0 +1,49 @@
+# Migration from 0.1 to 0.2
+
+Version 0.2:
+
+* Removes the `BASE_IMAGES_DIGESTS` result. Please remove all the references to this
+ result from your pipeline.
+ * Base images and their digests can be found in the SBOM for the output image.
+* No longer writes the `base_images_from_dockerfile` file into the `source` workspace.
+* Removes the `BUILDER_IMAGE` and `DOCKER_AUTH` params. Neither one did anything
+ in the later releases of version 0.1. Please stop passing these params to the
+ buildah task if you used to do so with version 0.1.
+
+## Konflux-specific
+
+In a typical Konflux pipeline, the two tasks that used to depend on the `BASE_IMAGES_DIGESTS`
+result are `build-source-image` and `deprecated-base-image-check`.
+
+1. Make sure your version of `deprecated-base-image-check` is at least `0.4`.
+2. Make sure your version of `build-source-image` supports reading base images from
+ the SBOM. Version `0.1` supports it since 2024-07-15. In the logs of your build
+ pipeline, you should see that the build-source-image task now has a GET-BASE-IMAGES
+ step. Once you stop passing the `BASE_IMAGES_DIGESTS` param, this step will emit
+ logs about handling the SBOM.
+3. Remove the parameters that reference the `BASE_IMAGES_DIGESTS` result:
+
+```diff
+@@ -255,10 +255,8 @@ spec:
+ - name: build-source-image
+ params:
+ - name: BINARY_IMAGE
+ value: $(params.output-image)
+- - name: BASE_IMAGES
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ runAfter:
+ - build-container
+ taskRef:
+ params:
+@@ -282,10 +280,8 @@ spec:
+ - name: workspace
+ workspace: workspace
+ - name: deprecated-base-image-check
+ params:
+- - name: BASE_IMAGES_DIGESTS
+- value: $(tasks.build-container.results.BASE_IMAGES_DIGESTS)
+ - name: IMAGE_URL
+ value: $(tasks.build-container.results.IMAGE_URL)
+ - name: IMAGE_DIGEST
+ value: $(tasks.build-container.results.IMAGE_DIGEST)
+```
diff --git a/task/buildah/0.2/README.md b/task/buildah/0.2/README.md
new file mode 100644
index 0000000000..a9da914b81
--- /dev/null
+++ b/task/buildah/0.2/README.md
@@ -0,0 +1,48 @@
+# buildah task
+
+Buildah task builds source code into a container image and pushes the image into container registry using buildah tool.
+In addition it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool.
+When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts.
+When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment.
+
+## Parameters
+|name|description|default value|required|
+|---|---|---|---|
+|IMAGE|Reference of the image buildah will produce.||true|
+|BUILDER_IMAGE|Deprecated. Has no effect. Will be removed in the future.|""|false|
+|DOCKERFILE|Path to the Dockerfile to build.|./Dockerfile|false|
+|CONTEXT|Path to the directory to use as context.|.|false|
+|TLSVERIFY|Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry)|true|false|
+|DOCKER_AUTH|unused, should be removed in next task version|""|false|
+|HERMETIC|Determines if build will be executed without network access.|false|false|
+|PREFETCH_INPUT|In case it is not empty, the prefetched content should be made available to the build.|""|false|
+|IMAGE_EXPIRES_AFTER|Delete image tag after specified time. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.|""|false|
+|COMMIT_SHA|The image is built from this commit.|""|false|
+|YUM_REPOS_D_SRC|Path in the git repository in which yum repository files are stored|repos.d|false|
+|YUM_REPOS_D_FETCHED|Path in source workspace where dynamically-fetched repos are present|fetched.repos.d|false|
+|YUM_REPOS_D_TARGET|Target path on the container in which yum repository files should be made available|/etc/yum.repos.d|false|
+|TARGET_STAGE|Target stage in Dockerfile to build. If not specified, the Dockerfile is processed entirely to (and including) its last stage.|""|false|
+|ENTITLEMENT_SECRET|Name of secret which contains the entitlement certificates|etc-pki-entitlement|false|
+|ACTIVATION_KEY|Name of secret which contains subscription activation key|activation-key|false|
+|ADDITIONAL_SECRET|Name of a secret which will be made available to the build with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET|does-not-exist|false|
+|BUILD_ARGS|Array of --build-arg values ("arg=value" strings)|[]|false|
+|BUILD_ARGS_FILE|Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file|""|false|
+|caTrustConfigMapName|The name of the ConfigMap to read CA bundle data from.|trusted-ca|false|
+|caTrustConfigMapKey|The name of the key in the ConfigMap that contains the CA bundle data.|ca-bundle.crt|false|
+|ADD_CAPABILITIES|Comma separated list of extra capabilities to add when running 'buildah build'|""|false|
+|SQUASH|Squash all new and previous layers added as a part of this build, as per --squash|false|false|
+|STORAGE_DRIVER|Storage driver to configure for buildah|vfs|false|
+|SKIP_UNUSED_STAGES|Whether to skip stages in Containerfile that seem unused by subsequent stages|true|false|
+
+## Results
+|name|description|
+|---|---|
+|IMAGE_DIGEST|Digest of the image just built|
+|IMAGE_URL|Image repository where the built image was pushed|
+|SBOM_JAVA_COMPONENTS_COUNT|The counting of Java components by publisher in JSON format|
+|JAVA_COMMUNITY_DEPENDENCIES|The Java dependencies that came from community sources such as Maven central.|
+
+## Workspaces
+|name|description|optional|
+|---|---|---|
+|source|Workspace containing the source code to build.|false|
diff --git a/task/buildah/0.2/buildah.yaml b/task/buildah/0.2/buildah.yaml
new file mode 100644
index 0000000000..ef88ff4d0c
--- /dev/null
+++ b/task/buildah/0.2/buildah.yaml
@@ -0,0 +1,556 @@
+apiVersion: tekton.dev/v1
+kind: Task
+metadata:
+ labels:
+ app.kubernetes.io/version: "0.2"
+ build.appstudio.redhat.com/build_type: "docker"
+ annotations:
+ tekton.dev/pipelines.minVersion: "0.12.1"
+ tekton.dev/tags: "image-build, konflux"
+ name: buildah
+spec:
+ description: |-
+ Buildah task builds source code into a container image and pushes the image into container registry using buildah tool.
+ In addition it generates a SBOM file, injects the SBOM file into final container image and pushes the SBOM file as separate image using cosign tool.
+ When [Java dependency rebuild](https://redhat-appstudio.github.io/docs.stonesoup.io/Documentation/main/cli/proc_enabled_java_dependencies.html) is enabled it triggers rebuilds of Java artifacts.
+ When prefetch-dependencies task was activated it is using its artifacts to run build in hermetic environment.
+ params:
+ - description: Reference of the image buildah will produce.
+ name: IMAGE
+ type: string
+ - default: ./Dockerfile
+ description: Path to the Dockerfile to build.
+ name: DOCKERFILE
+ type: string
+ - default: .
+ description: Path to the directory to use as context.
+ name: CONTEXT
+ type: string
+ - default: "true"
+ description: Verify the TLS on the registry endpoint (for push/pull to a non-TLS registry)
+ name: TLSVERIFY
+ type: string
+ - default: "false"
+ description: Determines if build will be executed without network access.
+ name: HERMETIC
+ type: string
+ - default: ""
+ description: In case it is not empty, the prefetched content should be made available to the build.
+ name: PREFETCH_INPUT
+ type: string
+ - default: ""
+ description: Delete image tag after specified time. Empty means to keep the image tag. Time values could be something like 1h, 2d, 3w for hours, days, and weeks, respectively.
+ name: IMAGE_EXPIRES_AFTER
+ type: string
+ - name: COMMIT_SHA
+ description: The image is built from this commit.
+ type: string
+ default: ""
+ - name: YUM_REPOS_D_SRC
+ description: Path in the git repository in which yum repository files are stored
+ default: repos.d
+ - name: YUM_REPOS_D_FETCHED
+ description: Path in source workspace where dynamically-fetched repos are present
+ default: fetched.repos.d
+ - name: YUM_REPOS_D_TARGET
+ description: Target path on the container in which yum repository files should be made available
+ default: /etc/yum.repos.d
+ - name: TARGET_STAGE
+ description: Target stage in Dockerfile to build. If not specified, the Dockerfile is processed entirely to (and including) its last stage.
+ type: string
+ default: ""
+ - name: ENTITLEMENT_SECRET
+ description: Name of secret which contains the entitlement certificates
+ type: string
+ default: "etc-pki-entitlement"
+ - name: ACTIVATION_KEY
+ default: activation-key
+ description: Name of secret which contains subscription activation key
+ type: string
+ - name: ADDITIONAL_SECRET
+ description: Name of a secret which will be made available to the build with 'buildah build --secret' at /run/secrets/$ADDITIONAL_SECRET
+ type: string
+ default: "does-not-exist"
+ - name: BUILD_ARGS
+ description: Array of --build-arg values ("arg=value" strings)
+ type: array
+ default: []
+ - name: BUILD_ARGS_FILE
+ description: Path to a file with build arguments, see https://www.mankier.com/1/buildah-build#--build-arg-file
+ type: string
+ default: ""
+ - name: caTrustConfigMapName
+ type: string
+ description: The name of the ConfigMap to read CA bundle data from.
+ default: trusted-ca
+ - name: caTrustConfigMapKey
+ type: string
+ description: The name of the key in the ConfigMap that contains the CA bundle data.
+ default: ca-bundle.crt
+ - name: ADD_CAPABILITIES
+ description: Comma separated list of extra capabilities to add when running 'buildah build'
+ type: string
+ default: ""
+ - name: SQUASH
+ description: Squash all new and previous layers added as a part of this build, as per --squash
+ type: string
+ default: "false"
+ - name: STORAGE_DRIVER
+ description: Storage driver to configure for buildah
+ type: string
+ default: vfs
+ - name: SKIP_UNUSED_STAGES
+ description: Whether to skip stages in Containerfile that seem unused by subsequent stages
+ type: string
+ default: "true"
+
+ results:
+ - description: Digest of the image just built
+ name: IMAGE_DIGEST
+ - description: Image repository where the built image was pushed
+ name: IMAGE_URL
+ - name: SBOM_JAVA_COMPONENTS_COUNT
+ description: The counting of Java components by publisher in JSON format
+ type: string
+ - name: JAVA_COMMUNITY_DEPENDENCIES
+ description: The Java dependencies that came from community sources such as Maven central.
+ stepTemplate:
+ volumeMounts:
+ - mountPath: /shared
+ name: shared
+ env:
+ - name: BUILDAH_FORMAT
+ value: oci
+ - name: STORAGE_DRIVER
+ value: $(params.STORAGE_DRIVER)
+ - name: HERMETIC
+ value: $(params.HERMETIC)
+ - name: CONTEXT
+ value: $(params.CONTEXT)
+ - name: DOCKERFILE
+ value: $(params.DOCKERFILE)
+ - name: IMAGE
+ value: $(params.IMAGE)
+ - name: TLSVERIFY
+ value: $(params.TLSVERIFY)
+ - name: IMAGE_EXPIRES_AFTER
+ value: $(params.IMAGE_EXPIRES_AFTER)
+ - name: YUM_REPOS_D_SRC
+ value: $(params.YUM_REPOS_D_SRC)
+ - name: YUM_REPOS_D_FETCHED
+ value: $(params.YUM_REPOS_D_FETCHED)
+ - name: YUM_REPOS_D_TARGET
+ value: $(params.YUM_REPOS_D_TARGET)
+ - name: TARGET_STAGE
+ value: $(params.TARGET_STAGE)
+ - name: ENTITLEMENT_SECRET
+ value: $(params.ENTITLEMENT_SECRET)
+ - name: ACTIVATION_KEY
+ value: $(params.ACTIVATION_KEY)
+ - name: ADDITIONAL_SECRET
+ value: $(params.ADDITIONAL_SECRET)
+ - name: BUILD_ARGS_FILE
+ value: $(params.BUILD_ARGS_FILE)
+ - name: ADD_CAPABILITIES
+ value: $(params.ADD_CAPABILITIES)
+ - name: SQUASH
+ value: $(params.SQUASH)
+ - name: SKIP_UNUSED_STAGES
+ value: $(params.SKIP_UNUSED_STAGES)
+
+ steps:
+ - image: quay.io/konflux-ci/buildah:latest@sha256:9ef792d74bcc1d330de6be58b61f2cdbfa1c23b74a291eb2136ffd452d373050
+ name: build
+ computeResources:
+ limits:
+ memory: 4Gi
+ requests:
+ memory: 512Mi
+ cpu: 250m
+ env:
+ - name: COMMIT_SHA
+ value: $(params.COMMIT_SHA)
+ args:
+ - $(params.BUILD_ARGS[*])
+ script: |
+ ca_bundle=/mnt/trusted-ca/ca-bundle.crt
+ if [ -f "$ca_bundle" ]; then
+ echo "INFO: Using mounted CA bundle: $ca_bundle"
+ cp -vf $ca_bundle /etc/pki/ca-trust/source/anchors
+ update-ca-trust
+ fi
+
+ SOURCE_CODE_DIR=source
+ if [ -e "$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE" ]; then
+ dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$CONTEXT/$DOCKERFILE"
+ elif [ -e "$SOURCE_CODE_DIR/$DOCKERFILE" ]; then
+ dockerfile_path="$(pwd)/$SOURCE_CODE_DIR/$DOCKERFILE"
+ elif echo "$DOCKERFILE" | grep -q "^https\?://"; then
+ echo "Fetch Dockerfile from $DOCKERFILE"
+ dockerfile_path=$(mktemp --suffix=-Dockerfile)
+ http_code=$(curl -s -L -w "%{http_code}" --output "$dockerfile_path" "$DOCKERFILE")
+ if [ $http_code != 200 ]; then
+ echo "No Dockerfile is fetched. Server responds $http_code"
+ exit 1
+ fi
+ http_code=$(curl -s -L -w "%{http_code}" --output "$dockerfile_path.dockerignore.tmp" "$DOCKERFILE.dockerignore")
+ if [ $http_code = 200 ]; then
+ echo "Fetched .dockerignore from $DOCKERFILE.dockerignore"
+ mv "$dockerfile_path.dockerignore.tmp" $SOURCE_CODE_DIR/$CONTEXT/.dockerignore
+ fi
+ else
+ echo "Cannot find Dockerfile $DOCKERFILE"
+ exit 1
+ fi
+ if [ -n "$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR" ] && grep -q '^\s*RUN \(./\)\?mvn' "$dockerfile_path"; then
+ sed -i -e "s|^\s*RUN \(\(./\)\?mvn\)\(.*\)|RUN echo \"mirror.defaulthttp://$JVM_BUILD_WORKSPACE_ARTIFACT_CACHE_PORT_80_TCP_ADDR/v1/cache/default/0/*\" > /tmp/settings.yaml; \1 -s /tmp/settings.yaml \3|g" "$dockerfile_path"
+ touch /var/lib/containers/java
+ fi
+
+ # Fixing group permission on /var/lib/containers
+ chown root:root /var/lib/containers
+
+ sed -i 's/^\s*short-name-mode\s*=\s*.*/short-name-mode = "disabled"/' /etc/containers/registries.conf
+
+ # Setting new namespace to run buildah - 2^32-2
+ echo 'root:1:4294967294' | tee -a /etc/subuid >> /etc/subgid
+
+ BUILDAH_ARGS=()
+
+ BASE_IMAGES=$(grep -i '^\s*FROM' "$dockerfile_path" | sed 's/--platform=\S*//' | awk '{print $2}' | (grep -v ^oci-archive: || true))
+ if [ "${HERMETIC}" == "true" ]; then
+ BUILDAH_ARGS+=("--pull=never")
+ UNSHARE_ARGS="--net"
+ for image in $BASE_IMAGES; do
+ if [ "${image}" != "scratch" ]; then
+ unshare -Ufp --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -- buildah pull $image
+ fi
+ done
+ echo "Build will be executed with network isolation"
+ fi
+
+ if [ -n "${TARGET_STAGE}" ]; then
+ BUILDAH_ARGS+=("--target=${TARGET_STAGE}")
+ fi
+
+ if [ -n "${BUILD_ARGS_FILE}" ]; then
+ BUILDAH_ARGS+=("--build-arg-file=$(pwd)/$SOURCE_CODE_DIR/${BUILD_ARGS_FILE}")
+ fi
+
+ for build_arg in "$@"; do
+ BUILDAH_ARGS+=("--build-arg=$build_arg")
+ done
+
+ if [ -n "${ADD_CAPABILITIES}" ]; then
+ BUILDAH_ARGS+=("--cap-add=${ADD_CAPABILITIES}")
+ fi
+
+ if [ "${SQUASH}" == "true" ]; then
+ BUILDAH_ARGS+=("--squash")
+ fi
+
+ if [ "${SKIP_UNUSED_STAGES}" != "true" ] ; then
+ BUILDAH_ARGS+=("--skip-unused-stages=false")
+ fi
+
+ if [ -f "$(workspaces.source.path)/cachi2/cachi2.env" ]; then
+ cp -r "$(workspaces.source.path)/cachi2" /tmp/
+ chmod -R go+rwX /tmp/cachi2
+ VOLUME_MOUNTS="--volume /tmp/cachi2:/cachi2"
+ sed -i 's|^\s*run |RUN . /cachi2/cachi2.env \&\& \\\n |i' "$dockerfile_path"
+ echo "Prefetched content will be made available"
+
+ prefetched_repo_for_my_arch="/tmp/cachi2/output/deps/rpm/$(uname -m)/repos.d/cachi2.repo"
+ if [ -f "$prefetched_repo_for_my_arch" ]; then
+ echo "Adding $prefetched_repo_for_my_arch to $YUM_REPOS_D_FETCHED"
+ mkdir -p "$YUM_REPOS_D_FETCHED"
+ cp --no-clobber "$prefetched_repo_for_my_arch" "$YUM_REPOS_D_FETCHED"
+ fi
+ fi
+
+ # if yum repofiles stored in git, copy them to mount point outside the source dir
+ if [ -d "${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}" ]; then
+ mkdir -p ${YUM_REPOS_D_FETCHED}
+ cp -r ${SOURCE_CODE_DIR}/${YUM_REPOS_D_SRC}/* ${YUM_REPOS_D_FETCHED}
+ fi
+
+ # if anything in the repofiles mount point (either fetched or from git), mount it
+ if [ -d "${YUM_REPOS_D_FETCHED}" ]; then
+ chmod -R go+rwX ${YUM_REPOS_D_FETCHED}
+ mount_point=$(realpath ${YUM_REPOS_D_FETCHED})
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume ${mount_point}:${YUM_REPOS_D_TARGET}"
+ fi
+
+ LABELS=(
+ "--label" "build-date=$(date -u +'%Y-%m-%dT%H:%M:%S')"
+ "--label" "architecture=$(uname -m)"
+ "--label" "vcs-type=git"
+ )
+ [ -n "$COMMIT_SHA" ] && LABELS+=("--label" "vcs-ref=$COMMIT_SHA")
+ [ -n "$IMAGE_EXPIRES_AFTER" ] && LABELS+=("--label" "quay.expires-after=$IMAGE_EXPIRES_AFTER")
+
+ ENTITLEMENT_PATH="/entitlement"
+ if [ -d "$ENTITLEMENT_PATH" ]; then
+ cp -r --preserve=mode "$ENTITLEMENT_PATH" /tmp/entitlement
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/entitlement:/etc/pki/entitlement"
+ echo "Adding the entitlement to the build"
+ fi
+
+ ACTIVATION_KEY_PATH="/activation-key"
+ if [ -d "$ACTIVATION_KEY_PATH" ]; then
+ cp -r --preserve=mode "$ACTIVATION_KEY_PATH" /tmp/activation-key
+ VOLUME_MOUNTS="${VOLUME_MOUNTS} --volume /tmp/activation-key:/activation-key"
+ echo "Adding activation key to the build"
+ fi
+
+ ADDITIONAL_SECRET_PATH="/additional-secret"
+ ADDITIONAL_SECRET_TMP="/tmp/additional-secret"
+ if [ -d "$ADDITIONAL_SECRET_PATH" ]; then
+ cp -r --preserve=mode -L "$ADDITIONAL_SECRET_PATH" $ADDITIONAL_SECRET_TMP
+ while read -r filename; do
+ echo "Adding the secret ${ADDITIONAL_SECRET}/${filename} to the build, available at /run/secrets/${ADDITIONAL_SECRET}/${filename}"
+ BUILDAH_ARGS+=("--secret=id=${ADDITIONAL_SECRET}/${filename},src=$ADDITIONAL_SECRET_TMP/${filename}")
+ done < <(find $ADDITIONAL_SECRET_TMP -maxdepth 1 -type f -exec basename {} \;)
+ fi
+
+ unshare -Uf $UNSHARE_ARGS --keep-caps -r --map-users 1,1,65536 --map-groups 1,1,65536 -w ${SOURCE_CODE_DIR}/$CONTEXT -- buildah build \
+ $VOLUME_MOUNTS \
+ "${BUILDAH_ARGS[@]}" \
+ "${LABELS[@]}" \
+ --tls-verify=$TLSVERIFY --no-cache \
+ --ulimit nofile=4096:4096 \
+ -f "$dockerfile_path" -t $IMAGE .
+
+ container=$(buildah from --pull-never $IMAGE)
+ buildah mount $container | tee /shared/container_path
+ echo $container > /shared/container_name
+
+ # Save the SBOM produced by Cachi2 so it can be merged into the final SBOM later
+ if [ -f "/tmp/cachi2/output/bom.json" ]; then
+ cp /tmp/cachi2/output/bom.json ./sbom-cachi2.json
+ fi
+
+ for image in $BASE_IMAGES; do
+ if [ "${image}" != "scratch" ]; then
+ buildah images --format '{{ .Name }}:{{ .Tag }}@{{ .Digest }}' --filter reference="$image" >> /shared/base_images_digests
+ fi
+ done
+
+ # Needed to generate base images SBOM
+ echo "$BASE_IMAGES" > /shared/base_images_from_dockerfile
+
+ securityContext:
+ capabilities:
+ add:
+ - SETFCAP
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ - mountPath: "/entitlement"
+ name: etc-pki-entitlement
+ - mountPath: /activation-key
+ name: activation-key
+ - mountPath: "/additional-secret"
+ name: additional-secret
+ - name: trusted-ca
+ mountPath: /mnt/trusted-ca
+ readOnly: true
+ workingDir: $(workspaces.source.path)
+
+ - name: sbom-syft-generate
+ image: quay.io/redhat-appstudio/syft:v0.105.1@sha256:1910b829997650c696881e5fc2fc654ddf3184c27edb1b2024e9cb2ba51ac431
+ # Respect Syft configuration if the user has it in the root of their repository
+ # (need to set the workdir, see https://github.com/anchore/syft/issues/2465)
+ workingDir: $(workspaces.source.path)/source
+ script: |
+ echo "Running syft on the source directory"
+ syft dir:$(workspaces.source.path)/source --output cyclonedx-json=$(workspaces.source.path)/sbom-source.json
+ find $(cat /shared/container_path) -xtype l -delete
+ echo "Running syft on the image filesystem"
+ syft dir:$(cat /shared/container_path) --output cyclonedx-json=$(workspaces.source.path)/sbom-image.json
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ - mountPath: /shared
+ name: shared
+ - name: analyse-dependencies-java-sbom
+ image: quay.io/redhat-appstudio/hacbs-jvm-build-request-processor:127ee0c223a2b56a9bd20a6f2eaeed3bd6015f77
+ script: |
+ if [ -f /var/lib/containers/java ]; then
+ /opt/jboss/container/java/run/run-java.sh analyse-dependencies path $(cat /shared/container_path) -s $(workspaces.source.path)/sbom-image.json --task-run-name $(context.taskRun.name) --publishers $(results.SBOM_JAVA_COMPONENTS_COUNT.path)
+ sed -i 's/^/ /' $(results.SBOM_JAVA_COMPONENTS_COUNT.path) # Workaround for SRVKP-2875
+ else
+ touch $(results.JAVA_COMMUNITY_DEPENDENCIES.path)
+ fi
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ - mountPath: /shared
+ name: shared
+ securityContext:
+ runAsUser: 0
+
+ - name: merge-syft-sboms
+ image: registry.access.redhat.com/ubi9/python-39:1-172.1712567222@sha256:c96f839e927c52990143df4efb2872946fcd5de9e1ed2014947bb2cf3084c27a
+ script: |
+ #!/bin/python3
+ import json
+
+ # load SBOMs
+ with open("./sbom-image.json") as f:
+ image_sbom = json.load(f)
+
+ with open("./sbom-source.json") as f:
+ source_sbom = json.load(f)
+
+ # fetch unique components from available SBOMs
+ def get_identifier(component):
+ return component["name"] + '@' + component.get("version", "")
+
+ image_sbom_components = image_sbom.setdefault("components", [])
+ existing_components = [get_identifier(component) for component in image_sbom_components]
+
+ source_sbom_components = source_sbom.get("components", [])
+ for component in source_sbom_components:
+ if get_identifier(component) not in existing_components:
+ image_sbom_components.append(component)
+ existing_components.append(get_identifier(component))
+
+ image_sbom_components.sort(key=lambda c: get_identifier(c))
+
+ # write the CycloneDX unified SBOM
+ with open("./sbom-cyclonedx.json", "w") as f:
+ json.dump(image_sbom, f, indent=4)
+ workingDir: $(workspaces.source.path)
+ securityContext:
+ runAsUser: 0
+
+ - name: merge-cachi2-sbom
+ image: quay.io/redhat-appstudio/cachi2:0.8.0@sha256:5cf15d6f3fb151a3e12c8a17024062b7cc62b0c3e1b165e4a9fa5bf7a77bdc30
+ script: |
+ if [ -f "sbom-cachi2.json" ]; then
+ echo "Merging contents of sbom-cachi2.json into sbom-cyclonedx.json"
+ /src/utils/merge_syft_sbom.py sbom-cachi2.json sbom-cyclonedx.json > sbom-temp.json
+ mv sbom-temp.json sbom-cyclonedx.json
+ else
+ echo "Skipping step since no Cachi2 SBOM was produced"
+ fi
+ workingDir: $(workspaces.source.path)
+ securityContext:
+ runAsUser: 0
+
+ - name: create-purl-sbom
+ image: registry.access.redhat.com/ubi9/python-39:1-172.1712567222@sha256:c96f839e927c52990143df4efb2872946fcd5de9e1ed2014947bb2cf3084c27a
+ script: |
+ #!/bin/python3
+ import json
+
+ with open("./sbom-cyclonedx.json") as f:
+ cyclonedx_sbom = json.load(f)
+
+ purls = [{"purl": component["purl"]} for component in cyclonedx_sbom.get("components", []) if "purl" in component]
+ purl_content = {"image_contents": {"dependencies": purls}}
+
+ with open("sbom-purl.json", "w") as output_file:
+ json.dump(purl_content, output_file, indent=4)
+ workingDir: $(workspaces.source.path)
+ securityContext:
+ runAsUser: 0
+
+ - name: create-base-images-sbom
+ image: quay.io/redhat-appstudio/base-images-sbom-script@sha256:667669e3def018f9dbb8eaf8868887a40bc07842221e9a98f6787edcff021840
+ script: |
+ python3 /app/base_images_sbom_script.py \
+ --sbom=sbom-cyclonedx.json \
+ --base-images-from-dockerfile=/shared/base_images_from_dockerfile \
+ --base-images-digests=/shared/base_images_digests
+ workingDir: $(workspaces.source.path)
+ securityContext:
+ runAsUser: 0
+
+ - name: inject-sbom-and-push
+ image: quay.io/konflux-ci/buildah:latest@sha256:9ef792d74bcc1d330de6be58b61f2cdbfa1c23b74a291eb2136ffd452d373050
+ computeResources: {}
+ script: |
+ base_image_name=$(buildah inspect --format '{{ index .ImageAnnotations "org.opencontainers.image.base.name"}}' $IMAGE | cut -f1 -d'@')
+ base_image_digest=$(buildah inspect --format '{{ index .ImageAnnotations "org.opencontainers.image.base.digest"}}' $IMAGE)
+ container=$(buildah from --pull-never $IMAGE)
+ buildah copy $container sbom-cyclonedx.json sbom-purl.json /root/buildinfo/content_manifests/
+ buildah config -a org.opencontainers.image.base.name=${base_image_name} -a org.opencontainers.image.base.digest=${base_image_digest} $container
+
+ BUILDAH_ARGS=()
+ if [ "${SQUASH}" == "true" ]; then
+ BUILDAH_ARGS+=("--squash")
+ fi
+
+ buildah commit "${BUILDAH_ARGS[@]}" $container $IMAGE
+
+ status=-1
+ max_run=5
+ sleep_sec=10
+ for run in $(seq 1 $max_run); do
+ status=0
+ [ "$run" -gt 1 ] && sleep $sleep_sec
+ echo "Pushing sbom image to registry"
+ buildah push \
+ --tls-verify=$TLSVERIFY \
+ --digestfile $(workspaces.source.path)/image-digest $IMAGE \
+ docker://$IMAGE && break || status=$?
+ done
+ if [ "$status" -ne 0 ]; then
+ echo "Failed to push sbom image to registry after ${max_run} tries"
+ exit 1
+ fi
+
+ cat "$(workspaces.source.path)"/image-digest | tee $(results.IMAGE_DIGEST.path)
+ echo -n "$IMAGE" | tee $(results.IMAGE_URL.path)
+
+ securityContext:
+ runAsUser: 0
+ capabilities:
+ add:
+ - SETFCAP
+ volumeMounts:
+ - mountPath: /var/lib/containers
+ name: varlibcontainers
+ workingDir: $(workspaces.source.path)
+
+ - name: upload-sbom
+ image: quay.io/redhat-appstudio/cosign:v2.1.1@sha256:c883d6f8d39148f2cea71bff4622d196d89df3e510f36c140c097b932f0dd5d5
+ args:
+ - attach
+ - sbom
+ - --sbom
+ - sbom-cyclonedx.json
+ - --type
+ - cyclonedx
+ - $(params.IMAGE)
+ workingDir: $(workspaces.source.path)
+
+ volumes:
+ - name: varlibcontainers
+ emptyDir: {}
+ - name: shared
+ emptyDir: {}
+ - name: etc-pki-entitlement
+ secret:
+ secretName: $(params.ENTITLEMENT_SECRET)
+ optional: true
+ - name: activation-key
+ secret:
+ optional: true
+ secretName: $(params.ACTIVATION_KEY)
+ - name: additional-secret
+ secret:
+ secretName: $(params.ADDITIONAL_SECRET)
+ optional: true
+ - name: trusted-ca
+ configMap:
+ name: $(params.caTrustConfigMapName)
+ items:
+ - key: $(params.caTrustConfigMapKey)
+ path: ca-bundle.crt
+ optional: true
+ workspaces:
+ - name: source
+ description: Workspace containing the source code to build.
diff --git a/task/buildah/0.2/kustomization.yaml b/task/buildah/0.2/kustomization.yaml
new file mode 100644
index 0000000000..6a3c230a1f
--- /dev/null
+++ b/task/buildah/0.2/kustomization.yaml
@@ -0,0 +1,5 @@
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+- buildah.yaml